1
0
Fork 0
mirror of https://github.com/Luzifer/share.git synced 2024-12-20 10:31:16 +00:00

Deps: Update vendored packages

Signed-off-by: Knut Ahlers <knut@ahlers.me>
This commit is contained in:
Knut Ahlers 2018-10-11 20:17:46 +02:00
parent 8af31b8e6b
commit 83c43a467c
Signed by: luzifer
GPG key ID: DC2729FDD34BE99E
129 changed files with 4295 additions and 705 deletions

39
Gopkg.lock generated
View file

@ -10,7 +10,7 @@
version = "v2.2.0"
[[projects]]
digest = "1:0b03c73fd48c21ae75250c872d34acdec99ec9ef22dc1ba705cf974c46097ef3"
digest = "1:8d903403288d74ead5312f02a5c5d9d8c53a86a77f5916c2a90a0606d9fcfa2e"
name = "github.com/aws/aws-sdk-go"
packages = [
"aws",
@ -30,6 +30,7 @@
"aws/request",
"aws/session",
"aws/signer/v4",
"internal/s3err",
"internal/sdkio",
"internal/sdkrand",
"internal/sdkuri",
@ -46,8 +47,8 @@
"service/sts",
]
pruneopts = "NUT"
revision = "3dd4f56d3cb9d194293525540562216f81bd3f27"
version = "v1.15.37"
revision = "46fe04784968cd7b54e859fcce45753456f736ac"
version = "v1.15.52"
[[projects]]
digest = "1:40a1cbb226e60770882b41e5b8e7f5c38426399c602fe75618de717e70dcb871"
@ -58,12 +59,12 @@
version = "v1.0.25"
[[projects]]
digest = "1:74d9b0a7b4107b41e0ade759fac64502876f82d29fb23d77b3dd24b194ee3dd5"
digest = "1:d32823ccbd16481c42356a1ae7f2284761da19cae3131b554a0b7c086a650292"
name = "github.com/go-ini/ini"
packages = ["."]
pruneopts = "NUT"
revision = "5cf292cae48347c2490ac1a58fe36735fb78df7e"
version = "v1.38.2"
revision = "7b294651033cd7d9e7f0d9ffa1b75ed1e198e737"
version = "v1.38.3"
[[projects]]
digest = "1:ac6d01547ec4f7f673311b4663909269bfb8249952de3279799289467837c3cc"
@ -72,6 +73,14 @@
pruneopts = "NUT"
revision = "0b12d6b5"
[[projects]]
digest = "1:4059c14e87a2de3a434430340521b5feece186c1469eff0834c29a63870de3ed"
name = "github.com/konsorten/go-windows-terminal-sequences"
packages = ["."]
pruneopts = "NUT"
revision = "5c8c8bd35d3832f5d134ae1e1e375b69a4d25242"
version = "v1.0.1"
[[projects]]
digest = "1:bff482b22ebed387378546ba6a7850fdef87fd47f8ee58a7c62124a8e889a56b"
name = "github.com/mattn/go-runewidth"
@ -81,20 +90,20 @@
version = "v0.0.3"
[[projects]]
digest = "1:b2339e83ce9b5c4f79405f949429a7f68a9a904fed903c672aac1e7ceb7f5f02"
digest = "1:ecf78eacf406c42f07f66d6b79fda24d2b92dc711bfd0760d0c931678f9621fe"
name = "github.com/sirupsen/logrus"
packages = ["."]
pruneopts = "NUT"
revision = "3e01752db0189b9157070a0e1668a620f9a85da2"
version = "v1.0.6"
revision = "ad15b42461921f1fb3529b058c6786c6a45d5162"
version = "v1.1.1"
[[projects]]
digest = "1:e3707aeaccd2adc89eba6c062fec72116fe1fc1ba71097da85b4d8ae1668a675"
digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779"
name = "github.com/spf13/pflag"
packages = ["."]
pruneopts = "NUT"
revision = "9a97c102cda95a86cec2345a6f09f55a939babf5"
version = "v1.0.2"
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
version = "v1.0.3"
[[projects]]
branch = "master"
@ -102,18 +111,18 @@
name = "golang.org/x/crypto"
packages = ["ssh/terminal"]
pruneopts = "NUT"
revision = "0e37d006457bf46f9e6692014ba72ef82c33022c"
revision = "7c1a557ab941a71c619514f229f0b27ccb0c27cf"
[[projects]]
branch = "master"
digest = "1:4bdbf3a743b920834a5425755782c3ea9bc0e0f2dc1c64401b9b03c825799a14"
digest = "1:71bcac4a94abe8132ef9c2bf008cca9579cda20194383b210b8d2e125777e982"
name = "golang.org/x/sys"
packages = [
"unix",
"windows",
]
pruneopts = "NUT"
revision = "ee1b12c67af419cf5a9be3bdbeea7fc1c5f32f11"
revision = "fa43e7bc11baaae89f3f902b2b4d832b68234844"
[[projects]]
branch = "v2"

View file

@ -18,7 +18,7 @@ const UseServiceDefaultRetries = -1
type RequestRetryer interface{}
// A Config provides service configuration for service clients. By default,
// all clients will use the defaults.DefaultConfig tructure.
// all clients will use the defaults.DefaultConfig structure.
//
// // Create Session with MaxRetry configuration to be shared by multiple
// // service clients.

View file

@ -65,6 +65,10 @@ type Provider struct {
//
// If ExpiryWindow is 0 or less it will be ignored.
ExpiryWindow time.Duration
// Optional authorization token value if set will be used as the value of
// the Authorization header of the endpoint credential request.
AuthorizationToken string
}
// NewProviderClient returns a credentials Provider for retrieving AWS credentials
@ -152,6 +156,9 @@ func (p *Provider) getCredentials() (*getCredentialsOutput, error) {
out := &getCredentialsOutput{}
req := p.Client.NewRequest(op, nil, out)
req.HTTPRequest.Header.Set("Accept", "application/json")
if authToken := p.AuthorizationToken; len(authToken) != 0 {
req.HTTPRequest.Header.Set("Authorization", authToken)
}
return out, req.Send()
}

View file

@ -48,4 +48,6 @@ type metric struct {
DNSLatency *int `json:"DnsLatency,omitempty"`
TCPLatency *int `json:"TcpLatency,omitempty"`
SSLLatency *int `json:"SslLatency,omitempty"`
MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"`
}

View file

@ -112,14 +112,16 @@ func (rep *Reporter) sendAPICallMetric(r *request.Request) {
now := time.Now()
m := metric{
ClientID: aws.String(rep.clientID),
API: aws.String(r.Operation.Name),
Service: aws.String(r.ClientInfo.ServiceID),
Timestamp: (*metricTime)(&now),
Type: aws.String("ApiCall"),
AttemptCount: aws.Int(r.RetryCount + 1),
Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)),
XAmzRequestID: aws.String(r.RequestID),
ClientID: aws.String(rep.clientID),
API: aws.String(r.Operation.Name),
Service: aws.String(r.ClientInfo.ServiceID),
Timestamp: (*metricTime)(&now),
Type: aws.String("ApiCall"),
AttemptCount: aws.Int(r.RetryCount + 1),
Region: r.Config.Region,
Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)),
XAmzRequestID: aws.String(r.RequestID),
MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())),
}
// TODO: Probably want to figure something out for logging dropped
@ -229,3 +231,12 @@ func (rep *Reporter) InjectHandlers(handlers *request.Handlers) {
handlers.AfterRetry.PushFrontNamed(apiCallAttemptHandler)
}
// boolIntValue return 1 for true and 0 for false.
func boolIntValue(b bool) int {
if b {
return 1
}
return 0
}

View file

@ -112,8 +112,9 @@ func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Pro
}
const (
httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN"
httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
)
// RemoteCredProvider returns a credentials provider for the default remote
@ -187,6 +188,7 @@ func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) crede
return endpointcreds.NewProviderClient(cfg, handlers, u,
func(p *endpointcreds.Provider) {
p.ExpiryWindow = 5 * time.Minute
p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar)
},
)
}

View file

@ -72,6 +72,7 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
ServiceID: ServiceName,
Endpoint: endpoint,
APIVersion: "latest",
},

View file

@ -1415,10 +1415,11 @@ var awsPartition = partition{
"iotanalytics": service{
Endpoints: endpoints{
"eu-west-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-2": endpoint{},
"ap-northeast-1": endpoint{},
"eu-west-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-2": endpoint{},
},
},
"kinesis": service{
@ -2109,11 +2110,16 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-1": endpoint{},
@ -2618,6 +2624,13 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
"codebuild": service{
Endpoints: endpoints{
"cn-north-1": endpoint{},
"cn-northwest-1": endpoint{},
},
},
"codedeploy": service{
Endpoints: endpoints{

View file

@ -17,6 +17,10 @@ const (
ParamMinValueErrCode = "ParamMinValueError"
// ParamMinLenErrCode is the error code for fields without enough elements.
ParamMinLenErrCode = "ParamMinLenError"
// ParamFormatErrCode is the error code for a field with invalid
// format or characters.
ParamFormatErrCode = "ParamFormatInvalidError"
)
// Validator provides a way for types to perform validation logic on their
@ -232,3 +236,26 @@ func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
func (e *ErrParamMinLen) MinLen() int {
return e.min
}
// An ErrParamFormat represents a invalid format parameter error.
type ErrParamFormat struct {
errInvalidParam
format string
}
// NewErrParamFormat creates a new invalid format parameter error.
func NewErrParamFormat(field string, format, value string) *ErrParamFormat {
return &ErrParamFormat{
errInvalidParam: errInvalidParam{
code: ParamFormatErrCode,
field: field,
msg: fmt.Sprintf("format %v, %v", format, value),
},
format: format,
}
}
// Format returns the field's required format.
func (e *ErrParamFormat) Format() string {
return e.format
}

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.15.37"
const SDKVersion = "1.15.52"

View file

@ -0,0 +1,57 @@
package s3err
import (
"fmt"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
)
// RequestFailure provides additional S3 specific metadata for the request
// failure.
type RequestFailure struct {
awserr.RequestFailure
hostID string
}
// NewRequestFailure returns a request failure error decordated with S3
// specific metadata.
func NewRequestFailure(err awserr.RequestFailure, hostID string) *RequestFailure {
return &RequestFailure{RequestFailure: err, hostID: hostID}
}
func (r RequestFailure) Error() string {
extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s",
r.StatusCode(), r.RequestID(), r.hostID)
return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr())
}
func (r RequestFailure) String() string {
return r.Error()
}
// HostID returns the HostID request response value.
func (r RequestFailure) HostID() string {
return r.hostID
}
// RequestFailureWrapperHandler returns a handler to rap an
// awserr.RequestFailure with the S3 request ID 2 from the response.
func RequestFailureWrapperHandler() request.NamedHandler {
return request.NamedHandler{
Name: "awssdk.s3.errorHandler",
Fn: func(req *request.Request) {
reqErr, ok := req.Error.(awserr.RequestFailure)
if !ok || reqErr == nil {
return
}
hostID := req.HTTPResponse.Header.Get("X-Amz-Id-2")
if req.Error == nil {
return
}
req.Error = NewRequestFailure(reqErr, hostID)
},
}
}

View file

@ -0,0 +1,21 @@
package protocol
// ValidHostLabel returns if the label is a valid RFC 1123 Section 2.1 domain
// host label name.
func ValidHostLabel(label string) bool {
if l := len(label); l == 0 || l > 63 {
return false
}
for _, r := range label {
switch {
case r >= '0' && r <= '9':
case r >= 'A' && r <= 'Z':
case r >= 'a' && r <= 'z':
case r == '-':
default:
return false
}
}
return true
}

View file

@ -27,7 +27,7 @@ const opAbortMultipartUpload = "AbortMultipartUpload"
// AbortMultipartUploadRequest generates a "aws/request.Request" representing the
// client's request for the AbortMultipartUpload operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -110,7 +110,7 @@ const opCompleteMultipartUpload = "CompleteMultipartUpload"
// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the
// client's request for the CompleteMultipartUpload operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -184,7 +184,7 @@ const opCopyObject = "CopyObject"
// CopyObjectRequest generates a "aws/request.Request" representing the
// client's request for the CopyObject operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -264,7 +264,7 @@ const opCreateBucket = "CreateBucket"
// CreateBucketRequest generates a "aws/request.Request" representing the
// client's request for the CreateBucket operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -346,7 +346,7 @@ const opCreateMultipartUpload = "CreateMultipartUpload"
// CreateMultipartUploadRequest generates a "aws/request.Request" representing the
// client's request for the CreateMultipartUpload operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -426,7 +426,7 @@ const opDeleteBucket = "DeleteBucket"
// DeleteBucketRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucket operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -503,7 +503,7 @@ const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration
// DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -580,7 +580,7 @@ const opDeleteBucketCors = "DeleteBucketCors"
// DeleteBucketCorsRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketCors operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -656,7 +656,7 @@ const opDeleteBucketEncryption = "DeleteBucketEncryption"
// DeleteBucketEncryptionRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketEncryption operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -732,7 +732,7 @@ const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration
// DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketInventoryConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -809,7 +809,7 @@ const opDeleteBucketLifecycle = "DeleteBucketLifecycle"
// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketLifecycle operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -885,7 +885,7 @@ const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration"
// DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketMetricsConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -962,7 +962,7 @@ const opDeleteBucketPolicy = "DeleteBucketPolicy"
// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketPolicy operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1038,7 +1038,7 @@ const opDeleteBucketReplication = "DeleteBucketReplication"
// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketReplication operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1114,7 +1114,7 @@ const opDeleteBucketTagging = "DeleteBucketTagging"
// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketTagging operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1190,7 +1190,7 @@ const opDeleteBucketWebsite = "DeleteBucketWebsite"
// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketWebsite operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1266,7 +1266,7 @@ const opDeleteObject = "DeleteObject"
// DeleteObjectRequest generates a "aws/request.Request" representing the
// client's request for the DeleteObject operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1342,7 +1342,7 @@ const opDeleteObjectTagging = "DeleteObjectTagging"
// DeleteObjectTaggingRequest generates a "aws/request.Request" representing the
// client's request for the DeleteObjectTagging operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1416,7 +1416,7 @@ const opDeleteObjects = "DeleteObjects"
// DeleteObjectsRequest generates a "aws/request.Request" representing the
// client's request for the DeleteObjects operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1491,7 +1491,7 @@ const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration"
// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketAccelerateConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1565,7 +1565,7 @@ const opGetBucketAcl = "GetBucketAcl"
// GetBucketAclRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketAcl operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1639,7 +1639,7 @@ const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration"
// GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketAnalyticsConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1714,7 +1714,7 @@ const opGetBucketCors = "GetBucketCors"
// GetBucketCorsRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketCors operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1788,7 +1788,7 @@ const opGetBucketEncryption = "GetBucketEncryption"
// GetBucketEncryptionRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketEncryption operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1862,7 +1862,7 @@ const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration"
// GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketInventoryConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1937,7 +1937,7 @@ const opGetBucketLifecycle = "GetBucketLifecycle"
// GetBucketLifecycleRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketLifecycle operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2020,7 +2020,7 @@ const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration"
// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketLifecycleConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2094,7 +2094,7 @@ const opGetBucketLocation = "GetBucketLocation"
// GetBucketLocationRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketLocation operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2168,7 +2168,7 @@ const opGetBucketLogging = "GetBucketLogging"
// GetBucketLoggingRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketLogging operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2243,7 +2243,7 @@ const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration"
// GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketMetricsConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2318,7 +2318,7 @@ const opGetBucketNotification = "GetBucketNotification"
// GetBucketNotificationRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketNotification operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2401,7 +2401,7 @@ const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration
// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketNotificationConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2475,7 +2475,7 @@ const opGetBucketPolicy = "GetBucketPolicy"
// GetBucketPolicyRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketPolicy operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2549,7 +2549,7 @@ const opGetBucketReplication = "GetBucketReplication"
// GetBucketReplicationRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketReplication operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2623,7 +2623,7 @@ const opGetBucketRequestPayment = "GetBucketRequestPayment"
// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketRequestPayment operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2697,7 +2697,7 @@ const opGetBucketTagging = "GetBucketTagging"
// GetBucketTaggingRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketTagging operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2771,7 +2771,7 @@ const opGetBucketVersioning = "GetBucketVersioning"
// GetBucketVersioningRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketVersioning operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2845,7 +2845,7 @@ const opGetBucketWebsite = "GetBucketWebsite"
// GetBucketWebsiteRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketWebsite operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2919,7 +2919,7 @@ const opGetObject = "GetObject"
// GetObjectRequest generates a "aws/request.Request" representing the
// client's request for the GetObject operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2998,7 +2998,7 @@ const opGetObjectAcl = "GetObjectAcl"
// GetObjectAclRequest generates a "aws/request.Request" representing the
// client's request for the GetObjectAcl operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3077,7 +3077,7 @@ const opGetObjectTagging = "GetObjectTagging"
// GetObjectTaggingRequest generates a "aws/request.Request" representing the
// client's request for the GetObjectTagging operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3151,7 +3151,7 @@ const opGetObjectTorrent = "GetObjectTorrent"
// GetObjectTorrentRequest generates a "aws/request.Request" representing the
// client's request for the GetObjectTorrent operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3225,7 +3225,7 @@ const opHeadBucket = "HeadBucket"
// HeadBucketRequest generates a "aws/request.Request" representing the
// client's request for the HeadBucket operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3307,7 +3307,7 @@ const opHeadObject = "HeadObject"
// HeadObjectRequest generates a "aws/request.Request" representing the
// client's request for the HeadObject operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3386,7 +3386,7 @@ const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations"
// ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the
// client's request for the ListBucketAnalyticsConfigurations operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3460,7 +3460,7 @@ const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations"
// ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the
// client's request for the ListBucketInventoryConfigurations operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3534,7 +3534,7 @@ const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations"
// ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the
// client's request for the ListBucketMetricsConfigurations operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3608,7 +3608,7 @@ const opListBuckets = "ListBuckets"
// ListBucketsRequest generates a "aws/request.Request" representing the
// client's request for the ListBuckets operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3682,7 +3682,7 @@ const opListMultipartUploads = "ListMultipartUploads"
// ListMultipartUploadsRequest generates a "aws/request.Request" representing the
// client's request for the ListMultipartUploads operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3812,7 +3812,7 @@ const opListObjectVersions = "ListObjectVersions"
// ListObjectVersionsRequest generates a "aws/request.Request" representing the
// client's request for the ListObjectVersions operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3942,7 +3942,7 @@ const opListObjects = "ListObjects"
// ListObjectsRequest generates a "aws/request.Request" representing the
// client's request for the ListObjects operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -4079,7 +4079,7 @@ const opListObjectsV2 = "ListObjectsV2"
// ListObjectsV2Request generates a "aws/request.Request" representing the
// client's request for the ListObjectsV2 operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -4217,7 +4217,7 @@ const opListParts = "ListParts"
// ListPartsRequest generates a "aws/request.Request" representing the
// client's request for the ListParts operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -4347,7 +4347,7 @@ const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration"
// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketAccelerateConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -4423,7 +4423,7 @@ const opPutBucketAcl = "PutBucketAcl"
// PutBucketAclRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketAcl operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -4499,7 +4499,7 @@ const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration"
// PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketAnalyticsConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -4576,7 +4576,7 @@ const opPutBucketCors = "PutBucketCors"
// PutBucketCorsRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketCors operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -4652,7 +4652,7 @@ const opPutBucketEncryption = "PutBucketEncryption"
// PutBucketEncryptionRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketEncryption operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -4729,7 +4729,7 @@ const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration"
// PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketInventoryConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -4806,7 +4806,7 @@ const opPutBucketLifecycle = "PutBucketLifecycle"
// PutBucketLifecycleRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketLifecycle operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -4891,7 +4891,7 @@ const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration"
// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketLifecycleConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -4968,7 +4968,7 @@ const opPutBucketLogging = "PutBucketLogging"
// PutBucketLoggingRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketLogging operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -5046,7 +5046,7 @@ const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration"
// PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketMetricsConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -5123,7 +5123,7 @@ const opPutBucketNotification = "PutBucketNotification"
// PutBucketNotificationRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketNotification operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -5208,7 +5208,7 @@ const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration
// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketNotificationConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -5284,7 +5284,7 @@ const opPutBucketPolicy = "PutBucketPolicy"
// PutBucketPolicyRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketPolicy operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -5361,7 +5361,7 @@ const opPutBucketReplication = "PutBucketReplication"
// PutBucketReplicationRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketReplication operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -5403,7 +5403,8 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req
// PutBucketReplication API operation for Amazon Simple Storage Service.
//
// Creates a new replication configuration (or replaces an existing one, if
// present).
// present). For more information, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html)
// in the Amazon S3 Developer Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@ -5438,7 +5439,7 @@ const opPutBucketRequestPayment = "PutBucketRequestPayment"
// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketRequestPayment operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -5518,7 +5519,7 @@ const opPutBucketTagging = "PutBucketTagging"
// PutBucketTaggingRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketTagging operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -5594,7 +5595,7 @@ const opPutBucketVersioning = "PutBucketVersioning"
// PutBucketVersioningRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketVersioning operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -5671,7 +5672,7 @@ const opPutBucketWebsite = "PutBucketWebsite"
// PutBucketWebsiteRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketWebsite operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -5747,7 +5748,7 @@ const opPutObject = "PutObject"
// PutObjectRequest generates a "aws/request.Request" representing the
// client's request for the PutObject operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -5821,7 +5822,7 @@ const opPutObjectAcl = "PutObjectAcl"
// PutObjectAclRequest generates a "aws/request.Request" representing the
// client's request for the PutObjectAcl operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -5901,7 +5902,7 @@ const opPutObjectTagging = "PutObjectTagging"
// PutObjectTaggingRequest generates a "aws/request.Request" representing the
// client's request for the PutObjectTagging operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -5975,7 +5976,7 @@ const opRestoreObject = "RestoreObject"
// RestoreObjectRequest generates a "aws/request.Request" representing the
// client's request for the RestoreObject operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -6054,7 +6055,7 @@ const opSelectObjectContent = "SelectObjectContent"
// SelectObjectContentRequest generates a "aws/request.Request" representing the
// client's request for the SelectObjectContent operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -6136,7 +6137,7 @@ const opUploadPart = "UploadPart"
// UploadPartRequest generates a "aws/request.Request" representing the
// client's request for the UploadPart operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -6216,7 +6217,7 @@ const opUploadPartCopy = "UploadPartCopy"
// UploadPartCopyRequest generates a "aws/request.Request" representing the
// client's request for the UploadPartCopy operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -9280,6 +9281,14 @@ func (s DeleteBucketPolicyOutput) GoString() string {
type DeleteBucketReplicationInput struct {
_ struct{} `type:"structure"`
// Deletes the replication subresource associated with the specified bucket.
//
// There is usually some time lag before replication configuration deletion
// is fully propagated to all the Amazon S3 systems.
//
// For more information, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html)
// in the Amazon S3 Developer Guide.
//
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
}
@ -9507,6 +9516,33 @@ func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry {
return s
}
// Specifies whether Amazon S3 should replicate delete makers.
type DeleteMarkerReplication struct {
_ struct{} `type:"structure"`
// The status of the delete marker replication.
//
// In the current implementation, Amazon S3 does not replicate the delete markers.
// Therefore, the status must be Disabled.
Status *string `type:"string" enum:"DeleteMarkerReplicationStatus"`
}
// String returns the string representation
func (s DeleteMarkerReplication) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteMarkerReplication) GoString() string {
return s.String()
}
// SetStatus sets the Status field's value.
func (s *DeleteMarkerReplication) SetStatus(v string) *DeleteMarkerReplication {
s.Status = &v
return s
}
type DeleteObjectInput struct {
_ struct{} `type:"structure"`
@ -9903,19 +9939,34 @@ type Destination struct {
_ struct{} `type:"structure"`
// Container for information regarding the access control for replicas.
//
// Use only in a cross-account scenario, where source and destination bucket
// owners are not the same, when you want to change replica ownership to the
// AWS account that owns the destination bucket. If you don't add this element
// to the replication configuration, the replicas are owned by same AWS account
// that owns the source object.
AccessControlTranslation *AccessControlTranslation `type:"structure"`
// Account ID of the destination bucket. Currently this is only being verified
// if Access Control Translation is enabled
// Account ID of the destination bucket. Currently Amazon S3 verifies this value
// only if Access Control Translation is enabled.
//
// In a cross-account scenario, if you tell Amazon S3 to change replica ownership
// to the AWS account that owns the destination bucket by adding the AccessControlTranslation
// element, this is the account ID of the destination bucket owner.
Account *string `type:"string"`
// Amazon resource name (ARN) of the bucket where you want Amazon S3 to store
// replicas of the object identified by the rule.
//
// If you have multiple rules in your replication configuration, all rules must
// specify the same bucket as the destination. A replication configuration can
// replicate objects only to one destination bucket.
//
// Bucket is a required field
Bucket *string `type:"string" required:"true"`
// Container for information regarding encryption based configuration for replicas.
// Container that provides encryption-related information. You must specify
// this element if the SourceSelectionCriteria is specified.
EncryptionConfiguration *EncryptionConfiguration `type:"structure"`
// The class of storage used to store the object.
@ -10052,7 +10103,8 @@ func (s *Encryption) SetKMSKeyId(v string) *Encryption {
type EncryptionConfiguration struct {
_ struct{} `type:"structure"`
// The id of the KMS key used to encrypt the replica object.
// The ID of the AWS KMS key for the region where the destination bucket resides.
// Amazon S3 uses this key to encrypt the replica object.
ReplicaKmsKeyID *string `type:"string"`
}
@ -19081,8 +19133,8 @@ type ReplicationConfiguration struct {
// Role is a required field
Role *string `type:"string" required:"true"`
// Container for information about a particular replication rule. Replication
// configuration must have at least one rule and can contain up to 1,000 rules.
// Container for one or more replication rules. Replication configuration must
// have at least one rule and can contain up to 1,000 rules.
//
// Rules is a required field
Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
@ -19140,22 +19192,50 @@ func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationCo
type ReplicationRule struct {
_ struct{} `type:"structure"`
// Specifies whether Amazon S3 should replicate delete makers.
DeleteMarkerReplication *DeleteMarkerReplication `type:"structure"`
// Container for replication destination information.
//
// Destination is a required field
Destination *Destination `type:"structure" required:"true"`
// Filter that identifies subset of objects to which the replication rule applies.
// A Filter must specify exactly one Prefix, Tag, or an And child element.
Filter *ReplicationRuleFilter `type:"structure"`
// Unique identifier for the rule. The value cannot be longer than 255 characters.
ID *string `type:"string"`
// Object keyname prefix identifying one or more objects to which the rule applies.
// Maximum prefix length can be up to 1,024 characters. Overlapping prefixes
// are not supported.
// Maximum prefix length can be up to 1,024 characters.
//
// Prefix is a required field
Prefix *string `type:"string" required:"true"`
// Deprecated: Prefix has been deprecated
Prefix *string `deprecated:"true" type:"string"`
// Container for filters that define which source objects should be replicated.
// The priority associated with the rule. If you specify multiple rules in a
// replication configuration, then Amazon S3 applies rule priority in the event
// there are conflicts (two or more rules identify the same object based on
// filter specified). The rule with higher priority takes precedence. For example,
//
// * Same object quality prefix based filter criteria If prefixes you specified
// in multiple rules overlap.
//
// * Same object qualify tag based filter criteria specified in multiple
// rules
//
// For more information, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html)
// in the Amazon S3 Developer Guide.
Priority *int64 `type:"integer"`
// Container that describes additional filters in identifying source objects
// that you want to replicate. Currently, Amazon S3 supports only the filter
// that you can specify for objects created with server-side encryption using
// an AWS KMS-managed key. You can choose to enable or disable replication of
// these objects.
//
// if you want Amazon S3 to replicate objects created with server-side encryption
// using AWS KMS-managed keys.
SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"`
// The rule is ignored if status is not Enabled.
@ -19180,9 +19260,6 @@ func (s *ReplicationRule) Validate() error {
if s.Destination == nil {
invalidParams.Add(request.NewErrParamRequired("Destination"))
}
if s.Prefix == nil {
invalidParams.Add(request.NewErrParamRequired("Prefix"))
}
if s.Status == nil {
invalidParams.Add(request.NewErrParamRequired("Status"))
}
@ -19191,6 +19268,11 @@ func (s *ReplicationRule) Validate() error {
invalidParams.AddNested("Destination", err.(request.ErrInvalidParams))
}
}
if s.Filter != nil {
if err := s.Filter.Validate(); err != nil {
invalidParams.AddNested("Filter", err.(request.ErrInvalidParams))
}
}
if s.SourceSelectionCriteria != nil {
if err := s.SourceSelectionCriteria.Validate(); err != nil {
invalidParams.AddNested("SourceSelectionCriteria", err.(request.ErrInvalidParams))
@ -19203,12 +19285,24 @@ func (s *ReplicationRule) Validate() error {
return nil
}
// SetDeleteMarkerReplication sets the DeleteMarkerReplication field's value.
func (s *ReplicationRule) SetDeleteMarkerReplication(v *DeleteMarkerReplication) *ReplicationRule {
s.DeleteMarkerReplication = v
return s
}
// SetDestination sets the Destination field's value.
func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule {
s.Destination = v
return s
}
// SetFilter sets the Filter field's value.
func (s *ReplicationRule) SetFilter(v *ReplicationRuleFilter) *ReplicationRule {
s.Filter = v
return s
}
// SetID sets the ID field's value.
func (s *ReplicationRule) SetID(v string) *ReplicationRule {
s.ID = &v
@ -19221,6 +19315,12 @@ func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule {
return s
}
// SetPriority sets the Priority field's value.
func (s *ReplicationRule) SetPriority(v int64) *ReplicationRule {
s.Priority = &v
return s
}
// SetSourceSelectionCriteria sets the SourceSelectionCriteria field's value.
func (s *ReplicationRule) SetSourceSelectionCriteria(v *SourceSelectionCriteria) *ReplicationRule {
s.SourceSelectionCriteria = v
@ -19233,6 +19333,130 @@ func (s *ReplicationRule) SetStatus(v string) *ReplicationRule {
return s
}
type ReplicationRuleAndOperator struct {
_ struct{} `type:"structure"`
Prefix *string `type:"string"`
Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"`
}
// String returns the string representation
func (s ReplicationRuleAndOperator) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ReplicationRuleAndOperator) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ReplicationRuleAndOperator) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleAndOperator"}
if s.Tags != nil {
for i, v := range s.Tags {
if v == nil {
continue
}
if err := v.Validate(); err != nil {
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
}
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetPrefix sets the Prefix field's value.
func (s *ReplicationRuleAndOperator) SetPrefix(v string) *ReplicationRuleAndOperator {
s.Prefix = &v
return s
}
// SetTags sets the Tags field's value.
func (s *ReplicationRuleAndOperator) SetTags(v []*Tag) *ReplicationRuleAndOperator {
s.Tags = v
return s
}
// Filter that identifies subset of objects to which the replication rule applies.
// A Filter must specify exactly one Prefix, Tag, or an And child element.
type ReplicationRuleFilter struct {
_ struct{} `type:"structure"`
// Container for specifying rule filters. These filters determine the subset
// of objects to which the rule applies. The element is required only if you
// specify more than one filter. For example:
//
// * You specify both a Prefix and a Tag filters. Then you wrap these in
// an And tag.
//
// * You specify filter based on multiple tags. Then you wrap the Tag elements
// in an And tag.
And *ReplicationRuleAndOperator `type:"structure"`
// Object keyname prefix that identifies subset of objects to which the rule
// applies.
Prefix *string `type:"string"`
// Container for specifying a tag key and value.
//
// The rule applies only to objects having the tag in its tagset.
Tag *Tag `type:"structure"`
}
// String returns the string representation
func (s ReplicationRuleFilter) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ReplicationRuleFilter) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ReplicationRuleFilter) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleFilter"}
if s.And != nil {
if err := s.And.Validate(); err != nil {
invalidParams.AddNested("And", err.(request.ErrInvalidParams))
}
}
if s.Tag != nil {
if err := s.Tag.Validate(); err != nil {
invalidParams.AddNested("Tag", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAnd sets the And field's value.
func (s *ReplicationRuleFilter) SetAnd(v *ReplicationRuleAndOperator) *ReplicationRuleFilter {
s.And = v
return s
}
// SetPrefix sets the Prefix field's value.
func (s *ReplicationRuleFilter) SetPrefix(v string) *ReplicationRuleFilter {
s.Prefix = &v
return s
}
// SetTag sets the Tag field's value.
func (s *ReplicationRuleFilter) SetTag(v *Tag) *ReplicationRuleFilter {
s.Tag = v
return s
}
type RequestPaymentConfiguration struct {
_ struct{} `type:"structure"`
@ -20411,6 +20635,8 @@ type SourceSelectionCriteria struct {
_ struct{} `type:"structure"`
// Container for filter information of selection of KMS Encrypted S3 objects.
// The element is required if you include SourceSelectionCriteria in the replication
// configuration.
SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"`
}
@ -21760,6 +21986,14 @@ const (
CompressionTypeBzip2 = "BZIP2"
)
const (
// DeleteMarkerReplicationStatusEnabled is a DeleteMarkerReplicationStatus enum value
DeleteMarkerReplicationStatusEnabled = "Enabled"
// DeleteMarkerReplicationStatusDisabled is a DeleteMarkerReplicationStatus enum value
DeleteMarkerReplicationStatusDisabled = "Disabled"
)
// Requests Amazon S3 to encode the object keys in the response and specifies
// the encoding method to use. An object key may contain any Unicode character;
// however, XML 1.0 parser cannot parse some characters, such as characters

View file

@ -3,6 +3,7 @@ package s3
import (
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/s3err"
)
func init() {
@ -21,6 +22,7 @@ func defaultInitClientFn(c *client.Client) {
// S3 uses custom error unmarshaling logic
c.Handlers.UnmarshalError.Clear()
c.Handlers.UnmarshalError.PushBack(unmarshalError)
c.Handlers.UnmarshalError.PushBackNamed(s3err.RequestFailureWrapperHandler())
}
func defaultInitRequestFn(r *request.Request) {
@ -42,6 +44,7 @@ func defaultInitRequestFn(r *request.Request) {
r.Handlers.Validate.PushFront(populateLocationConstraint)
case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload:
r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError)
r.Handlers.Unmarshal.PushBackNamed(s3err.RequestFailureWrapperHandler())
case opPutObject, opUploadPart:
r.Handlers.Build.PushBack(computeBodyHashes)
// Disabled until #1837 root issue is resolved.

View file

@ -67,7 +67,9 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
}
// Handlers
svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
svc.Handlers.Sign.PushBackNamed(v4.BuildNamedHandler(v4.SignRequestHandler.Name, func(s *v4.Signer) {
s.DisableURIPathEscaping = true
}))
svc.Handlers.Build.PushBackNamed(restxml.BuildHandler)
svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler)
svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler)

View file

@ -13,7 +13,11 @@ import (
func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
if err != nil {
r.Error = awserr.New("SerializationError", "unable to read response body", err)
r.Error = awserr.NewRequestFailure(
awserr.New("SerializationError", "unable to read response body", err),
r.HTTPResponse.StatusCode,
r.RequestID,
)
return
}
body := bytes.NewReader(b)

View file

@ -23,22 +23,17 @@ func unmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
defer io.Copy(ioutil.Discard, r.HTTPResponse.Body)
hostID := r.HTTPResponse.Header.Get("X-Amz-Id-2")
// Bucket exists in a different region, and request needs
// to be made to the correct region.
if r.HTTPResponse.StatusCode == http.StatusMovedPermanently {
r.Error = requestFailure{
RequestFailure: awserr.NewRequestFailure(
awserr.New("BucketRegionError",
fmt.Sprintf("incorrect region, the bucket is not in '%s' region",
aws.StringValue(r.Config.Region)),
nil),
r.HTTPResponse.StatusCode,
r.RequestID,
),
hostID: hostID,
}
r.Error = awserr.NewRequestFailure(
awserr.New("BucketRegionError",
fmt.Sprintf("incorrect region, the bucket is not in '%s' region",
aws.StringValue(r.Config.Region)),
nil),
r.HTTPResponse.StatusCode,
r.RequestID,
)
return
}
@ -63,14 +58,11 @@ func unmarshalError(r *request.Request) {
errMsg = statusText
}
r.Error = requestFailure{
RequestFailure: awserr.NewRequestFailure(
awserr.New(errCode, errMsg, err),
r.HTTPResponse.StatusCode,
r.RequestID,
),
hostID: hostID,
}
r.Error = awserr.NewRequestFailure(
awserr.New(errCode, errMsg, err),
r.HTTPResponse.StatusCode,
r.RequestID,
)
}
// A RequestFailure provides access to the S3 Request ID and Host ID values
@ -83,21 +75,3 @@ type RequestFailure interface {
// Host ID is the S3 Host ID needed for debug, and contacting support
HostID() string
}
type requestFailure struct {
awserr.RequestFailure
hostID string
}
func (r requestFailure) Error() string {
extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s",
r.StatusCode(), r.RequestID(), r.hostID)
return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr())
}
func (r requestFailure) String() string {
return r.Error()
}
func (r requestFailure) HostID() string {
return r.hostID
}

View file

@ -15,7 +15,7 @@ const opAssumeRole = "AssumeRole"
// AssumeRoleRequest generates a "aws/request.Request" representing the
// client's request for the AssumeRole operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -209,7 +209,7 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML"
// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the
// client's request for the AssumeRoleWithSAML operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -391,7 +391,7 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity"
// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the
// client's request for the AssumeRoleWithWebIdentity operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -602,7 +602,7 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage"
// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the
// client's request for the DecodeAuthorizationMessage operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -714,7 +714,7 @@ const opGetCallerIdentity = "GetCallerIdentity"
// GetCallerIdentityRequest generates a "aws/request.Request" representing the
// client's request for the GetCallerIdentity operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -789,7 +789,7 @@ const opGetFederationToken = "GetFederationToken"
// GetFederationTokenRequest generates a "aws/request.Request" representing the
// client's request for the GetFederationToken operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -958,7 +958,7 @@ const opGetSessionToken = "GetSessionToken"
// GetSessionTokenRequest generates a "aws/request.Request" representing the
// client's request for the GetSessionToken operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.

View file

@ -227,7 +227,8 @@ func (f *File) Append(source interface{}, others ...interface{}) error {
}
func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
equalSign := "="
equalSign := DefaultFormatLeft + "=" + DefaultFormatRight
if PrettyFormat || PrettyEqual {
equalSign = " = "
}

View file

@ -34,7 +34,7 @@ const (
// Maximum allowed depth when recursively substituing variable names.
_DEPTH_VALUES = 99
_VERSION = "1.38.2"
_VERSION = "1.38.3"
)
// Version returns current package version literal.
@ -48,6 +48,10 @@ var (
// at package init time.
LineBreak = "\n"
// Place custom spaces when PrettyFormat and PrettyEqual are both disabled
DefaultFormatLeft = ""
DefaultFormatRight = ""
// Variable regexp pattern: %(variable)s
varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`)

View file

@ -0,0 +1,9 @@
(The MIT License)
Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -0,0 +1,36 @@
// +build windows
package sequences
import (
"syscall"
"unsafe"
)
var (
kernel32Dll *syscall.LazyDLL = syscall.NewLazyDLL("Kernel32.dll")
setConsoleMode *syscall.LazyProc = kernel32Dll.NewProc("SetConsoleMode")
)
func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error {
const ENABLE_VIRTUAL_TERMINAL_PROCESSING uint32 = 0x4
var mode uint32
err := syscall.GetConsoleMode(syscall.Stdout, &mode)
if err != nil {
return err
}
if enable {
mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING
} else {
mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING
}
ret, _, err := setConsoleMode.Call(uintptr(unsafe.Pointer(stream)), uintptr(mode))
if ret == 0 {
return err
}
return nil
}

View file

@ -4,6 +4,7 @@ import (
"bytes"
"fmt"
"os"
"reflect"
"sync"
"time"
)
@ -41,8 +42,11 @@ type Entry struct {
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
Message string
// When formatter is called in entry.log(), an Buffer may be set to entry
// When formatter is called in entry.log(), a Buffer may be set to entry
Buffer *bytes.Buffer
// err may contain a field formatting error
err string
}
func NewEntry(logger *Logger) *Entry {
@ -80,10 +84,18 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
for k, v := range entry.Data {
data[k] = v
}
var field_err string
for k, v := range fields {
data[k] = v
if t := reflect.TypeOf(v); t != nil && t.Kind() == reflect.Func {
field_err = fmt.Sprintf("can not add field %q", k)
if entry.err != "" {
field_err = entry.err + ", " + field_err
}
} else {
data[k] = v
}
}
return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time}
return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: field_err}
}
// Overrides the time of the Entry.
@ -137,9 +149,9 @@ func (entry *Entry) fireHooks() {
}
func (entry *Entry) write() {
serialized, err := entry.Logger.Formatter.Format(entry)
entry.Logger.mu.Lock()
defer entry.Logger.mu.Unlock()
serialized, err := entry.Logger.Formatter.Format(entry)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
} else {
@ -151,7 +163,7 @@ func (entry *Entry) write() {
}
func (entry *Entry) Debug(args ...interface{}) {
if entry.Logger.level() >= DebugLevel {
if entry.Logger.IsLevelEnabled(DebugLevel) {
entry.log(DebugLevel, fmt.Sprint(args...))
}
}
@ -161,13 +173,13 @@ func (entry *Entry) Print(args ...interface{}) {
}
func (entry *Entry) Info(args ...interface{}) {
if entry.Logger.level() >= InfoLevel {
if entry.Logger.IsLevelEnabled(InfoLevel) {
entry.log(InfoLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Warn(args ...interface{}) {
if entry.Logger.level() >= WarnLevel {
if entry.Logger.IsLevelEnabled(WarnLevel) {
entry.log(WarnLevel, fmt.Sprint(args...))
}
}
@ -177,20 +189,20 @@ func (entry *Entry) Warning(args ...interface{}) {
}
func (entry *Entry) Error(args ...interface{}) {
if entry.Logger.level() >= ErrorLevel {
if entry.Logger.IsLevelEnabled(ErrorLevel) {
entry.log(ErrorLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Fatal(args ...interface{}) {
if entry.Logger.level() >= FatalLevel {
if entry.Logger.IsLevelEnabled(FatalLevel) {
entry.log(FatalLevel, fmt.Sprint(args...))
}
Exit(1)
}
func (entry *Entry) Panic(args ...interface{}) {
if entry.Logger.level() >= PanicLevel {
if entry.Logger.IsLevelEnabled(PanicLevel) {
entry.log(PanicLevel, fmt.Sprint(args...))
}
panic(fmt.Sprint(args...))
@ -199,13 +211,13 @@ func (entry *Entry) Panic(args ...interface{}) {
// Entry Printf family functions
func (entry *Entry) Debugf(format string, args ...interface{}) {
if entry.Logger.level() >= DebugLevel {
if entry.Logger.IsLevelEnabled(DebugLevel) {
entry.Debug(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Infof(format string, args ...interface{}) {
if entry.Logger.level() >= InfoLevel {
if entry.Logger.IsLevelEnabled(InfoLevel) {
entry.Info(fmt.Sprintf(format, args...))
}
}
@ -215,7 +227,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) {
}
func (entry *Entry) Warnf(format string, args ...interface{}) {
if entry.Logger.level() >= WarnLevel {
if entry.Logger.IsLevelEnabled(WarnLevel) {
entry.Warn(fmt.Sprintf(format, args...))
}
}
@ -225,20 +237,20 @@ func (entry *Entry) Warningf(format string, args ...interface{}) {
}
func (entry *Entry) Errorf(format string, args ...interface{}) {
if entry.Logger.level() >= ErrorLevel {
if entry.Logger.IsLevelEnabled(ErrorLevel) {
entry.Error(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Fatalf(format string, args ...interface{}) {
if entry.Logger.level() >= FatalLevel {
if entry.Logger.IsLevelEnabled(FatalLevel) {
entry.Fatal(fmt.Sprintf(format, args...))
}
Exit(1)
}
func (entry *Entry) Panicf(format string, args ...interface{}) {
if entry.Logger.level() >= PanicLevel {
if entry.Logger.IsLevelEnabled(PanicLevel) {
entry.Panic(fmt.Sprintf(format, args...))
}
}
@ -246,13 +258,13 @@ func (entry *Entry) Panicf(format string, args ...interface{}) {
// Entry Println family functions
func (entry *Entry) Debugln(args ...interface{}) {
if entry.Logger.level() >= DebugLevel {
if entry.Logger.IsLevelEnabled(DebugLevel) {
entry.Debug(entry.sprintlnn(args...))
}
}
func (entry *Entry) Infoln(args ...interface{}) {
if entry.Logger.level() >= InfoLevel {
if entry.Logger.IsLevelEnabled(InfoLevel) {
entry.Info(entry.sprintlnn(args...))
}
}
@ -262,7 +274,7 @@ func (entry *Entry) Println(args ...interface{}) {
}
func (entry *Entry) Warnln(args ...interface{}) {
if entry.Logger.level() >= WarnLevel {
if entry.Logger.IsLevelEnabled(WarnLevel) {
entry.Warn(entry.sprintlnn(args...))
}
}
@ -272,20 +284,20 @@ func (entry *Entry) Warningln(args ...interface{}) {
}
func (entry *Entry) Errorln(args ...interface{}) {
if entry.Logger.level() >= ErrorLevel {
if entry.Logger.IsLevelEnabled(ErrorLevel) {
entry.Error(entry.sprintlnn(args...))
}
}
func (entry *Entry) Fatalln(args ...interface{}) {
if entry.Logger.level() >= FatalLevel {
if entry.Logger.IsLevelEnabled(FatalLevel) {
entry.Fatal(entry.sprintlnn(args...))
}
Exit(1)
}
func (entry *Entry) Panicln(args ...interface{}) {
if entry.Logger.level() >= PanicLevel {
if entry.Logger.IsLevelEnabled(PanicLevel) {
entry.Panic(entry.sprintlnn(args...))
}
}

View file

@ -21,30 +21,27 @@ func SetOutput(out io.Writer) {
// SetFormatter sets the standard logger formatter.
func SetFormatter(formatter Formatter) {
std.mu.Lock()
defer std.mu.Unlock()
std.Formatter = formatter
std.SetFormatter(formatter)
}
// SetLevel sets the standard logger level.
func SetLevel(level Level) {
std.mu.Lock()
defer std.mu.Unlock()
std.SetLevel(level)
}
// GetLevel returns the standard logger level.
func GetLevel() Level {
std.mu.Lock()
defer std.mu.Unlock()
return std.level()
return std.GetLevel()
}
// IsLevelEnabled checks if the log level of the standard logger is greater than the level param
func IsLevelEnabled(level Level) bool {
return std.IsLevelEnabled(level)
}
// AddHook adds a hook to the standard logger hooks.
func AddHook(hook Hook) {
std.mu.Lock()
defer std.mu.Unlock()
std.Hooks.Add(hook)
std.AddHook(hook)
}
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.

View file

@ -2,7 +2,14 @@ package logrus
import "time"
const defaultTimestampFormat = time.RFC3339
// Default key names for the default fields
const (
defaultTimestampFormat = time.RFC3339
FieldKeyMsg = "msg"
FieldKeyLevel = "level"
FieldKeyTime = "time"
FieldKeyLogrusError = "logrus_error"
)
// The Formatter interface is used to implement a custom Formatter. It takes an
// `Entry`. It exposes all the fields, including the default ones:
@ -48,4 +55,10 @@ func prefixFieldClashes(data Fields, fieldMap FieldMap) {
data["fields."+levelKey] = l
delete(data, levelKey)
}
logrusErrKey := fieldMap.resolve(FieldKeyLogrusError)
if l, ok := data[logrusErrKey]; ok {
data["fields."+logrusErrKey] = l
delete(data, logrusErrKey)
}
}

View file

@ -1,6 +1,7 @@
package logrus
import (
"bytes"
"encoding/json"
"fmt"
)
@ -10,13 +11,6 @@ type fieldKey string
// FieldMap allows customization of the key names for default fields.
type FieldMap map[fieldKey]string
// Default key names for the default fields
const (
FieldKeyMsg = "msg"
FieldKeyLevel = "level"
FieldKeyTime = "time"
)
func (f FieldMap) resolve(key fieldKey) string {
if k, ok := f[key]; ok {
return k
@ -46,6 +40,9 @@ type JSONFormatter struct {
// },
// }
FieldMap FieldMap
// PrettyPrint will indent all json logs
PrettyPrint bool
}
// Format renders a single log entry
@ -75,15 +72,29 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
timestampFormat = defaultTimestampFormat
}
if entry.err != "" {
data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err
}
if !f.DisableTimestamp {
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
}
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
serialized, err := json.Marshal(data)
if err != nil {
var b *bytes.Buffer
if entry.Buffer != nil {
b = entry.Buffer
} else {
b = &bytes.Buffer{}
}
encoder := json.NewEncoder(b)
if f.PrettyPrint {
encoder.SetIndent("", " ")
}
if err := encoder.Encode(data); err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
}
return append(serialized, '\n'), nil
return b.Bytes(), nil
}

View file

@ -11,7 +11,7 @@ import (
type Logger struct {
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
// file, or leave it default which is `os.Stderr`. You can also set this to
// something more adventorous, such as logging to Kafka.
// something more adventurous, such as logging to Kafka.
Out io.Writer
// Hooks for the logger instance. These allow firing events based on logging
// levels and log entries. For example, to send errors to an error tracking
@ -85,6 +85,7 @@ func (logger *Logger) newEntry() *Entry {
}
func (logger *Logger) releaseEntry(entry *Entry) {
entry.Data = map[string]interface{}{}
logger.entryPool.Put(entry)
}
@ -121,7 +122,7 @@ func (logger *Logger) WithTime(t time.Time) *Entry {
}
func (logger *Logger) Debugf(format string, args ...interface{}) {
if logger.level() >= DebugLevel {
if logger.IsLevelEnabled(DebugLevel) {
entry := logger.newEntry()
entry.Debugf(format, args...)
logger.releaseEntry(entry)
@ -129,7 +130,7 @@ func (logger *Logger) Debugf(format string, args ...interface{}) {
}
func (logger *Logger) Infof(format string, args ...interface{}) {
if logger.level() >= InfoLevel {
if logger.IsLevelEnabled(InfoLevel) {
entry := logger.newEntry()
entry.Infof(format, args...)
logger.releaseEntry(entry)
@ -143,7 +144,7 @@ func (logger *Logger) Printf(format string, args ...interface{}) {
}
func (logger *Logger) Warnf(format string, args ...interface{}) {
if logger.level() >= WarnLevel {
if logger.IsLevelEnabled(WarnLevel) {
entry := logger.newEntry()
entry.Warnf(format, args...)
logger.releaseEntry(entry)
@ -151,7 +152,7 @@ func (logger *Logger) Warnf(format string, args ...interface{}) {
}
func (logger *Logger) Warningf(format string, args ...interface{}) {
if logger.level() >= WarnLevel {
if logger.IsLevelEnabled(WarnLevel) {
entry := logger.newEntry()
entry.Warnf(format, args...)
logger.releaseEntry(entry)
@ -159,7 +160,7 @@ func (logger *Logger) Warningf(format string, args ...interface{}) {
}
func (logger *Logger) Errorf(format string, args ...interface{}) {
if logger.level() >= ErrorLevel {
if logger.IsLevelEnabled(ErrorLevel) {
entry := logger.newEntry()
entry.Errorf(format, args...)
logger.releaseEntry(entry)
@ -167,7 +168,7 @@ func (logger *Logger) Errorf(format string, args ...interface{}) {
}
func (logger *Logger) Fatalf(format string, args ...interface{}) {
if logger.level() >= FatalLevel {
if logger.IsLevelEnabled(FatalLevel) {
entry := logger.newEntry()
entry.Fatalf(format, args...)
logger.releaseEntry(entry)
@ -176,7 +177,7 @@ func (logger *Logger) Fatalf(format string, args ...interface{}) {
}
func (logger *Logger) Panicf(format string, args ...interface{}) {
if logger.level() >= PanicLevel {
if logger.IsLevelEnabled(PanicLevel) {
entry := logger.newEntry()
entry.Panicf(format, args...)
logger.releaseEntry(entry)
@ -184,7 +185,7 @@ func (logger *Logger) Panicf(format string, args ...interface{}) {
}
func (logger *Logger) Debug(args ...interface{}) {
if logger.level() >= DebugLevel {
if logger.IsLevelEnabled(DebugLevel) {
entry := logger.newEntry()
entry.Debug(args...)
logger.releaseEntry(entry)
@ -192,7 +193,7 @@ func (logger *Logger) Debug(args ...interface{}) {
}
func (logger *Logger) Info(args ...interface{}) {
if logger.level() >= InfoLevel {
if logger.IsLevelEnabled(InfoLevel) {
entry := logger.newEntry()
entry.Info(args...)
logger.releaseEntry(entry)
@ -206,7 +207,7 @@ func (logger *Logger) Print(args ...interface{}) {
}
func (logger *Logger) Warn(args ...interface{}) {
if logger.level() >= WarnLevel {
if logger.IsLevelEnabled(WarnLevel) {
entry := logger.newEntry()
entry.Warn(args...)
logger.releaseEntry(entry)
@ -214,7 +215,7 @@ func (logger *Logger) Warn(args ...interface{}) {
}
func (logger *Logger) Warning(args ...interface{}) {
if logger.level() >= WarnLevel {
if logger.IsLevelEnabled(WarnLevel) {
entry := logger.newEntry()
entry.Warn(args...)
logger.releaseEntry(entry)
@ -222,7 +223,7 @@ func (logger *Logger) Warning(args ...interface{}) {
}
func (logger *Logger) Error(args ...interface{}) {
if logger.level() >= ErrorLevel {
if logger.IsLevelEnabled(ErrorLevel) {
entry := logger.newEntry()
entry.Error(args...)
logger.releaseEntry(entry)
@ -230,7 +231,7 @@ func (logger *Logger) Error(args ...interface{}) {
}
func (logger *Logger) Fatal(args ...interface{}) {
if logger.level() >= FatalLevel {
if logger.IsLevelEnabled(FatalLevel) {
entry := logger.newEntry()
entry.Fatal(args...)
logger.releaseEntry(entry)
@ -239,7 +240,7 @@ func (logger *Logger) Fatal(args ...interface{}) {
}
func (logger *Logger) Panic(args ...interface{}) {
if logger.level() >= PanicLevel {
if logger.IsLevelEnabled(PanicLevel) {
entry := logger.newEntry()
entry.Panic(args...)
logger.releaseEntry(entry)
@ -247,7 +248,7 @@ func (logger *Logger) Panic(args ...interface{}) {
}
func (logger *Logger) Debugln(args ...interface{}) {
if logger.level() >= DebugLevel {
if logger.IsLevelEnabled(DebugLevel) {
entry := logger.newEntry()
entry.Debugln(args...)
logger.releaseEntry(entry)
@ -255,7 +256,7 @@ func (logger *Logger) Debugln(args ...interface{}) {
}
func (logger *Logger) Infoln(args ...interface{}) {
if logger.level() >= InfoLevel {
if logger.IsLevelEnabled(InfoLevel) {
entry := logger.newEntry()
entry.Infoln(args...)
logger.releaseEntry(entry)
@ -269,7 +270,7 @@ func (logger *Logger) Println(args ...interface{}) {
}
func (logger *Logger) Warnln(args ...interface{}) {
if logger.level() >= WarnLevel {
if logger.IsLevelEnabled(WarnLevel) {
entry := logger.newEntry()
entry.Warnln(args...)
logger.releaseEntry(entry)
@ -277,7 +278,7 @@ func (logger *Logger) Warnln(args ...interface{}) {
}
func (logger *Logger) Warningln(args ...interface{}) {
if logger.level() >= WarnLevel {
if logger.IsLevelEnabled(WarnLevel) {
entry := logger.newEntry()
entry.Warnln(args...)
logger.releaseEntry(entry)
@ -285,7 +286,7 @@ func (logger *Logger) Warningln(args ...interface{}) {
}
func (logger *Logger) Errorln(args ...interface{}) {
if logger.level() >= ErrorLevel {
if logger.IsLevelEnabled(ErrorLevel) {
entry := logger.newEntry()
entry.Errorln(args...)
logger.releaseEntry(entry)
@ -293,7 +294,7 @@ func (logger *Logger) Errorln(args ...interface{}) {
}
func (logger *Logger) Fatalln(args ...interface{}) {
if logger.level() >= FatalLevel {
if logger.IsLevelEnabled(FatalLevel) {
entry := logger.newEntry()
entry.Fatalln(args...)
logger.releaseEntry(entry)
@ -302,7 +303,7 @@ func (logger *Logger) Fatalln(args ...interface{}) {
}
func (logger *Logger) Panicln(args ...interface{}) {
if logger.level() >= PanicLevel {
if logger.IsLevelEnabled(PanicLevel) {
entry := logger.newEntry()
entry.Panicln(args...)
logger.releaseEntry(entry)
@ -320,18 +321,47 @@ func (logger *Logger) level() Level {
return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
}
// SetLevel sets the logger level.
func (logger *Logger) SetLevel(level Level) {
atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
}
func (logger *Logger) SetOutput(out io.Writer) {
logger.mu.Lock()
defer logger.mu.Unlock()
logger.Out = out
// GetLevel returns the logger level.
func (logger *Logger) GetLevel() Level {
return logger.level()
}
// AddHook adds a hook to the logger hooks.
func (logger *Logger) AddHook(hook Hook) {
logger.mu.Lock()
defer logger.mu.Unlock()
logger.Hooks.Add(hook)
}
// IsLevelEnabled checks if the log level of the logger is greater than the level param
func (logger *Logger) IsLevelEnabled(level Level) bool {
return logger.level() >= level
}
// SetFormatter sets the logger formatter.
func (logger *Logger) SetFormatter(formatter Formatter) {
logger.mu.Lock()
defer logger.mu.Unlock()
logger.Formatter = formatter
}
// SetOutput sets the logger output.
func (logger *Logger) SetOutput(output io.Writer) {
logger.mu.Lock()
defer logger.mu.Unlock()
logger.Out = output
}
// ReplaceHooks replaces the logger hooks and returns the old ones
func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks {
logger.mu.Lock()
oldHooks := logger.Hooks
logger.Hooks = hooks
logger.mu.Unlock()
return oldHooks
}

View file

@ -140,4 +140,11 @@ type FieldLogger interface {
Errorln(args ...interface{})
Fatalln(args ...interface{})
Panicln(args ...interface{})
// IsDebugEnabled() bool
// IsInfoEnabled() bool
// IsWarnEnabled() bool
// IsErrorEnabled() bool
// IsFatalEnabled() bool
// IsPanicEnabled() bool
}

View file

@ -1,10 +0,0 @@
// +build darwin freebsd openbsd netbsd dragonfly
// +build !appengine,!gopherjs
package logrus
import "golang.org/x/sys/unix"
const ioctlReadTermios = unix.TIOCGETA
type Termios unix.Termios

View file

@ -1,4 +1,4 @@
// +build appengine gopherjs
// +build appengine
package logrus

11
vendor/github.com/sirupsen/logrus/terminal_check_js.go generated vendored Normal file
View file

@ -0,0 +1,11 @@
// +build js
package logrus
import (
"io"
)
func checkIfTerminal(w io.Writer) bool {
return false
}

View file

@ -1,4 +1,4 @@
// +build !appengine,!gopherjs
// +build !appengine,!js,!windows
package logrus

View file

@ -0,0 +1,20 @@
// +build !appengine,!js,windows
package logrus
import (
"io"
"os"
"syscall"
)
func checkIfTerminal(w io.Writer) bool {
switch v := w.(type) {
case *os.File:
var mode uint32
err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode)
return err == nil
default:
return false
}
}

View file

@ -1,14 +0,0 @@
// Based on ssh/terminal:
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine,!gopherjs
package logrus
import "golang.org/x/sys/unix"
const ioctlReadTermios = unix.TCGETS
type Termios unix.Termios

View file

@ -0,0 +1,8 @@
// +build !windows
package logrus
import "io"
func initTerminal(w io.Writer) {
}

18
vendor/github.com/sirupsen/logrus/terminal_windows.go generated vendored Normal file
View file

@ -0,0 +1,18 @@
// +build !appengine,!js,windows
package logrus
import (
"io"
"os"
"syscall"
sequences "github.com/konsorten/go-windows-terminal-sequences"
)
func initTerminal(w io.Writer) {
switch v := w.(type) {
case *os.File:
sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true)
}
}

View file

@ -3,6 +3,7 @@ package logrus
import (
"bytes"
"fmt"
"os"
"sort"
"strings"
"sync"
@ -35,6 +36,9 @@ type TextFormatter struct {
// Force disabling colors.
DisableColors bool
// Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/
EnvironmentOverrideColors bool
// Disable timestamp logging. useful when output is redirected to logging
// system that already adds timestamps.
DisableTimestamp bool
@ -51,6 +55,9 @@ type TextFormatter struct {
// be desired.
DisableSorting bool
// The keys sorting function, when uninitialized it uses sort.Strings.
SortingFunc func([]string)
// Disables the truncation of the level text to 4 characters.
DisableLevelTruncation bool
@ -69,15 +76,35 @@ type TextFormatter struct {
// FieldKeyMsg: "@message"}}
FieldMap FieldMap
sync.Once
terminalInitOnce sync.Once
}
func (f *TextFormatter) init(entry *Entry) {
if entry.Logger != nil {
f.isTerminal = checkIfTerminal(entry.Logger.Out)
if f.isTerminal {
initTerminal(entry.Logger.Out)
}
}
}
func (f *TextFormatter) isColored() bool {
isColored := f.ForceColors || f.isTerminal
if f.EnvironmentOverrideColors {
if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" {
isColored = true
} else if ok && force == "0" {
isColored = false
} else if os.Getenv("CLICOLOR") == "0" {
isColored = false
}
}
return isColored && !f.DisableColors
}
// Format renders a single log entry
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
prefixFieldClashes(entry.Data, f.FieldMap)
@ -87,8 +114,32 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
keys = append(keys, k)
}
fixedKeys := make([]string, 0, 4+len(entry.Data))
if !f.DisableTimestamp {
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime))
}
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel))
if entry.Message != "" {
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg))
}
if entry.err != "" {
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError))
}
if !f.DisableSorting {
sort.Strings(keys)
if f.SortingFunc == nil {
sort.Strings(keys)
fixedKeys = append(fixedKeys, keys...)
} else {
if !f.isColored() {
fixedKeys = append(fixedKeys, keys...)
f.SortingFunc(fixedKeys)
} else {
f.SortingFunc(keys)
}
}
} else {
fixedKeys = append(fixedKeys, keys...)
}
var b *bytes.Buffer
@ -98,26 +149,30 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
b = &bytes.Buffer{}
}
f.Do(func() { f.init(entry) })
isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
f.terminalInitOnce.Do(func() { f.init(entry) })
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
timestampFormat = defaultTimestampFormat
}
if isColored {
if f.isColored() {
f.printColored(b, entry, keys, timestampFormat)
} else {
if !f.DisableTimestamp {
f.appendKeyValue(b, f.FieldMap.resolve(FieldKeyTime), entry.Time.Format(timestampFormat))
}
f.appendKeyValue(b, f.FieldMap.resolve(FieldKeyLevel), entry.Level.String())
if entry.Message != "" {
f.appendKeyValue(b, f.FieldMap.resolve(FieldKeyMsg), entry.Message)
}
for _, key := range keys {
f.appendKeyValue(b, key, entry.Data[key])
for _, key := range fixedKeys {
var value interface{}
switch key {
case f.FieldMap.resolve(FieldKeyTime):
value = entry.Time.Format(timestampFormat)
case f.FieldMap.resolve(FieldKeyLevel):
value = entry.Level.String()
case f.FieldMap.resolve(FieldKeyMsg):
value = entry.Message
case f.FieldMap.resolve(FieldKeyLogrusError):
value = entry.err
default:
value = entry.Data[key]
}
f.appendKeyValue(b, key, value)
}
}
@ -143,6 +198,10 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
levelText = levelText[0:4]
}
// Remove a single newline if it already exists in the message to keep
// the behavior of logrus text_formatter the same as the stdlib log package
entry.Message = strings.TrimSuffix(entry.Message, "\n")
if f.DisableTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
} else if !f.FullTimestamp {

View file

@ -925,13 +925,16 @@ func stripUnknownFlagValue(args []string) []string {
}
first := args[0]
if first[0] == '-' {
if len(first) > 0 && first[0] == '-' {
//--unknown --next-flag ...
return args
}
//--unknown arg ... (args will be arg ...)
return args[1:]
if len(args) > 1 {
return args[1:]
}
return nil
}
func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) {

149
vendor/github.com/spf13/pflag/string_to_int.go generated vendored Normal file
View file

@ -0,0 +1,149 @@
package pflag
import (
"bytes"
"fmt"
"strconv"
"strings"
)
// -- stringToInt Value
type stringToIntValue struct {
value *map[string]int
changed bool
}
func newStringToIntValue(val map[string]int, p *map[string]int) *stringToIntValue {
ssv := new(stringToIntValue)
ssv.value = p
*ssv.value = val
return ssv
}
// Format: a=1,b=2
func (s *stringToIntValue) Set(val string) error {
ss := strings.Split(val, ",")
out := make(map[string]int, len(ss))
for _, pair := range ss {
kv := strings.SplitN(pair, "=", 2)
if len(kv) != 2 {
return fmt.Errorf("%s must be formatted as key=value", pair)
}
var err error
out[kv[0]], err = strconv.Atoi(kv[1])
if err != nil {
return err
}
}
if !s.changed {
*s.value = out
} else {
for k, v := range out {
(*s.value)[k] = v
}
}
s.changed = true
return nil
}
func (s *stringToIntValue) Type() string {
return "stringToInt"
}
func (s *stringToIntValue) String() string {
var buf bytes.Buffer
i := 0
for k, v := range *s.value {
if i > 0 {
buf.WriteRune(',')
}
buf.WriteString(k)
buf.WriteRune('=')
buf.WriteString(strconv.Itoa(v))
i++
}
return "[" + buf.String() + "]"
}
func stringToIntConv(val string) (interface{}, error) {
val = strings.Trim(val, "[]")
// An empty string would cause an empty map
if len(val) == 0 {
return map[string]int{}, nil
}
ss := strings.Split(val, ",")
out := make(map[string]int, len(ss))
for _, pair := range ss {
kv := strings.SplitN(pair, "=", 2)
if len(kv) != 2 {
return nil, fmt.Errorf("%s must be formatted as key=value", pair)
}
var err error
out[kv[0]], err = strconv.Atoi(kv[1])
if err != nil {
return nil, err
}
}
return out, nil
}
// GetStringToInt return the map[string]int value of a flag with the given name
func (f *FlagSet) GetStringToInt(name string) (map[string]int, error) {
val, err := f.getFlagType(name, "stringToInt", stringToIntConv)
if err != nil {
return map[string]int{}, err
}
return val.(map[string]int), nil
}
// StringToIntVar defines a string flag with specified name, default value, and usage string.
// The argument p points to a map[string]int variable in which to store the values of the multiple flags.
// The value of each argument will not try to be separated by comma
func (f *FlagSet) StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
f.VarP(newStringToIntValue(value, p), name, "", usage)
}
// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
f.VarP(newStringToIntValue(value, p), name, shorthand, usage)
}
// StringToIntVar defines a string flag with specified name, default value, and usage string.
// The argument p points to a map[string]int variable in which to store the value of the flag.
// The value of each argument will not try to be separated by comma
func StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
CommandLine.VarP(newStringToIntValue(value, p), name, "", usage)
}
// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
func StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
CommandLine.VarP(newStringToIntValue(value, p), name, shorthand, usage)
}
// StringToInt defines a string flag with specified name, default value, and usage string.
// The return value is the address of a map[string]int variable that stores the value of the flag.
// The value of each argument will not try to be separated by comma
func (f *FlagSet) StringToInt(name string, value map[string]int, usage string) *map[string]int {
p := map[string]int{}
f.StringToIntVarP(&p, name, "", value, usage)
return &p
}
// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
p := map[string]int{}
f.StringToIntVarP(&p, name, shorthand, value, usage)
return &p
}
// StringToInt defines a string flag with specified name, default value, and usage string.
// The return value is the address of a map[string]int variable that stores the value of the flag.
// The value of each argument will not try to be separated by comma
func StringToInt(name string, value map[string]int, usage string) *map[string]int {
return CommandLine.StringToIntP(name, "", value, usage)
}
// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
func StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
return CommandLine.StringToIntP(name, shorthand, value, usage)
}

160
vendor/github.com/spf13/pflag/string_to_string.go generated vendored Normal file
View file

@ -0,0 +1,160 @@
package pflag
import (
"bytes"
"encoding/csv"
"fmt"
"strings"
)
// -- stringToString Value
type stringToStringValue struct {
value *map[string]string
changed bool
}
func newStringToStringValue(val map[string]string, p *map[string]string) *stringToStringValue {
ssv := new(stringToStringValue)
ssv.value = p
*ssv.value = val
return ssv
}
// Format: a=1,b=2
func (s *stringToStringValue) Set(val string) error {
var ss []string
n := strings.Count(val, "=")
switch n {
case 0:
return fmt.Errorf("%s must be formatted as key=value", val)
case 1:
ss = append(ss, strings.Trim(val, `"`))
default:
r := csv.NewReader(strings.NewReader(val))
var err error
ss, err = r.Read()
if err != nil {
return err
}
}
out := make(map[string]string, len(ss))
for _, pair := range ss {
kv := strings.SplitN(pair, "=", 2)
if len(kv) != 2 {
return fmt.Errorf("%s must be formatted as key=value", pair)
}
out[kv[0]] = kv[1]
}
if !s.changed {
*s.value = out
} else {
for k, v := range out {
(*s.value)[k] = v
}
}
s.changed = true
return nil
}
func (s *stringToStringValue) Type() string {
return "stringToString"
}
func (s *stringToStringValue) String() string {
records := make([]string, 0, len(*s.value)>>1)
for k, v := range *s.value {
records = append(records, k+"="+v)
}
var buf bytes.Buffer
w := csv.NewWriter(&buf)
if err := w.Write(records); err != nil {
panic(err)
}
w.Flush()
return "[" + strings.TrimSpace(buf.String()) + "]"
}
func stringToStringConv(val string) (interface{}, error) {
val = strings.Trim(val, "[]")
// An empty string would cause an empty map
if len(val) == 0 {
return map[string]string{}, nil
}
r := csv.NewReader(strings.NewReader(val))
ss, err := r.Read()
if err != nil {
return nil, err
}
out := make(map[string]string, len(ss))
for _, pair := range ss {
kv := strings.SplitN(pair, "=", 2)
if len(kv) != 2 {
return nil, fmt.Errorf("%s must be formatted as key=value", pair)
}
out[kv[0]] = kv[1]
}
return out, nil
}
// GetStringToString return the map[string]string value of a flag with the given name
func (f *FlagSet) GetStringToString(name string) (map[string]string, error) {
val, err := f.getFlagType(name, "stringToString", stringToStringConv)
if err != nil {
return map[string]string{}, err
}
return val.(map[string]string), nil
}
// StringToStringVar defines a string flag with specified name, default value, and usage string.
// The argument p points to a map[string]string variable in which to store the values of the multiple flags.
// The value of each argument will not try to be separated by comma
func (f *FlagSet) StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
f.VarP(newStringToStringValue(value, p), name, "", usage)
}
// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
f.VarP(newStringToStringValue(value, p), name, shorthand, usage)
}
// StringToStringVar defines a string flag with specified name, default value, and usage string.
// The argument p points to a map[string]string variable in which to store the value of the flag.
// The value of each argument will not try to be separated by comma
func StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
CommandLine.VarP(newStringToStringValue(value, p), name, "", usage)
}
// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
func StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
CommandLine.VarP(newStringToStringValue(value, p), name, shorthand, usage)
}
// StringToString defines a string flag with specified name, default value, and usage string.
// The return value is the address of a map[string]string variable that stores the value of the flag.
// The value of each argument will not try to be separated by comma
func (f *FlagSet) StringToString(name string, value map[string]string, usage string) *map[string]string {
p := map[string]string{}
f.StringToStringVarP(&p, name, "", value, usage)
return &p
}
// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
p := map[string]string{}
f.StringToStringVarP(&p, name, shorthand, value, usage)
return &p
}
// StringToString defines a string flag with specified name, default value, and usage string.
// The return value is the address of a map[string]string variable that stores the value of the flag.
// The value of each argument will not try to be separated by comma
func StringToString(name string, value map[string]string, usage string) *map[string]string {
return CommandLine.StringToStringP(name, "", value, usage)
}
// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
func StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
return CommandLine.StringToStringP(name, shorthand, value, usage)
}

View file

@ -6,17 +6,7 @@
// +build ppc
// Functions to access/create device major and minor numbers matching the
// encoding used by the Linux kernel and glibc.
//
// The information below is extracted and adapted from bits/sysmacros.h in the
// glibc sources:
//
// dev_t in glibc is 64-bit, with 32-bit major and minor numbers. glibc's
// default encoding is MMMM Mmmm mmmM MMmm, where M is a hex digit of the major
// number and m is a hex digit of the minor number. This is backward compatible
// with legacy systems where dev_t is 16 bits wide, encoded as MMmm. It is also
// backward compatible with the Linux kernel, which for some architectures uses
// 32-bit dev_t, encoded as mmmM MMmm.
// encoding used by AIX.
package unix

View file

@ -6,17 +6,7 @@
// +build ppc64
// Functions to access/create device major and minor numbers matching the
// encoding used by the Linux kernel and glibc.
//
// The information below is extracted and adapted from bits/sysmacros.h in the
// glibc sources:
//
// dev_t in glibc is 64-bit, with 32-bit major and minor numbers. glibc's
// default encoding is MMMM Mmmm mmmM MMmm, where M is a hex digit of the major
// number and m is a hex digit of the minor number. This is backward compatible
// with legacy systems where dev_t is 16 bits wide, encoded as MMmm. It is also
// backward compatible with the Linux kernel, which for some architectures uses
// 32-bit dev_t, encoded as mmmM MMmm.
// encoding used AIX.
package unix

View file

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package unix

View file

@ -12,7 +12,7 @@ import "unsafe"
// Round the length of a raw sockaddr up to align it properly.
func cmsgAlignOf(salen int) int {
salign := sizeofPtr
salign := SizeofPtr
// NOTE: It seems like 64-bit Darwin, DragonFly BSD and
// Solaris kernels still require 32-bit aligned access to
// network subsystem.

View file

@ -345,11 +345,11 @@ func IoctlSetInt(fd int, req uint, value int) error {
return ioctl(fd, req, uintptr(value))
}
func IoctlSetWinsize(fd int, req uint, value *Winsize) error {
func ioctlSetWinsize(fd int, req uint, value *Winsize) error {
return ioctl(fd, req, uintptr(unsafe.Pointer(value)))
}
func IoctlSetTermios(fd int, req uint, value *Termios) error {
func ioctlSetTermios(fd int, req uint, value *Termios) error {
return ioctl(fd, req, uintptr(unsafe.Pointer(value)))
}
@ -419,8 +419,10 @@ func Flock(fd int, how int) (err error) {
//sysnb Getsid(pid int) (sid int, err error)
//sysnb Kill(pid int, sig syscall.Signal) (err error)
//sys Klogctl(typ int, buf []byte) (n int, err error) = syslog
//sys Mkdir(dirfd int, path string, mode uint32) (err error)
//sys Mkdirat(dirfd int, path string, mode uint32) (err error)
//sys Mkfifo(path string, mode uint32) (err error)
//sys Mknod(path string, mode uint32, dev int) (err error)
//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error)
//sys Nanosleep(time *Timespec, leftover *Timespec) (err error)
//sys Open(path string, mode int, perm uint32) (fd int, err error) = open64

View file

@ -692,6 +692,24 @@ func (sa *SockaddrVM) sockaddr() (unsafe.Pointer, _Socklen, error) {
return unsafe.Pointer(&sa.raw), SizeofSockaddrVM, nil
}
type SockaddrXDP struct {
Flags uint16
Ifindex uint32
QueueID uint32
SharedUmemFD uint32
raw RawSockaddrXDP
}
func (sa *SockaddrXDP) sockaddr() (unsafe.Pointer, _Socklen, error) {
sa.raw.Family = AF_XDP
sa.raw.Flags = sa.Flags
sa.raw.Ifindex = sa.Ifindex
sa.raw.Queue_id = sa.QueueID
sa.raw.Shared_umem_fd = sa.SharedUmemFD
return unsafe.Pointer(&sa.raw), SizeofSockaddrXDP, nil
}
func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
switch rsa.Addr.Family {
case AF_NETLINK:
@ -793,6 +811,15 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
}
return sa, nil
}
case AF_XDP:
pp := (*RawSockaddrXDP)(unsafe.Pointer(rsa))
sa := &SockaddrXDP{
Flags: pp.Flags,
Ifindex: pp.Ifindex,
QueueID: pp.Queue_id,
SharedUmemFD: pp.Shared_umem_fd,
}
return sa, nil
}
return nil, EAFNOSUPPORT
}
@ -1095,7 +1122,7 @@ func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err erro
// The ptrace syscall differs from glibc's ptrace.
// Peeks returns the word in *data, not as the return value.
var buf [sizeofPtr]byte
var buf [SizeofPtr]byte
// Leading edge. PEEKTEXT/PEEKDATA don't require aligned
// access (PEEKUSER warns that it might), but if we don't
@ -1103,12 +1130,12 @@ func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err erro
// boundary and not get the bytes leading up to the page
// boundary.
n := 0
if addr%sizeofPtr != 0 {
err = ptrace(req, pid, addr-addr%sizeofPtr, uintptr(unsafe.Pointer(&buf[0])))
if addr%SizeofPtr != 0 {
err = ptrace(req, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0])))
if err != nil {
return 0, err
}
n += copy(out, buf[addr%sizeofPtr:])
n += copy(out, buf[addr%SizeofPtr:])
out = out[n:]
}
@ -1146,15 +1173,15 @@ func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (c
// Leading edge.
n := 0
if addr%sizeofPtr != 0 {
var buf [sizeofPtr]byte
err = ptrace(peekReq, pid, addr-addr%sizeofPtr, uintptr(unsafe.Pointer(&buf[0])))
if addr%SizeofPtr != 0 {
var buf [SizeofPtr]byte
err = ptrace(peekReq, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0])))
if err != nil {
return 0, err
}
n += copy(buf[addr%sizeofPtr:], data)
n += copy(buf[addr%SizeofPtr:], data)
word := *((*uintptr)(unsafe.Pointer(&buf[0])))
err = ptrace(pokeReq, pid, addr-addr%sizeofPtr, word)
err = ptrace(pokeReq, pid, addr-addr%SizeofPtr, word)
if err != nil {
return 0, err
}
@ -1162,19 +1189,19 @@ func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (c
}
// Interior.
for len(data) > sizeofPtr {
for len(data) > SizeofPtr {
word := *((*uintptr)(unsafe.Pointer(&data[0])))
err = ptrace(pokeReq, pid, addr+uintptr(n), word)
if err != nil {
return n, err
}
n += sizeofPtr
data = data[sizeofPtr:]
n += SizeofPtr
data = data[SizeofPtr:]
}
// Trailing edge.
if len(data) > 0 {
var buf [sizeofPtr]byte
var buf [SizeofPtr]byte
err = ptrace(peekReq, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0])))
if err != nil {
return n, err
@ -1273,6 +1300,7 @@ func Mount(source string, target string, fstype string, flags uintptr, data stri
//sys Adjtimex(buf *Timex) (state int, err error)
//sys Chdir(path string) (err error)
//sys Chroot(path string) (err error)
//sys ClockGetres(clockid int32, res *Timespec) (err error)
//sys ClockGettime(clockid int32, time *Timespec) (err error)
//sys Close(fd int) (err error)
//sys CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error)
@ -1496,7 +1524,6 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
// Brk
// Capget
// Capset
// ClockGetres
// ClockNanosleep
// ClockSettime
// Clone

View file

@ -160,3 +160,16 @@ func Poll(fds []PollFd, timeout int) (n int, err error) {
}
return poll(&fds[0], len(fds), timeout)
}
//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error)
func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error {
cmdlineLen := len(cmdline)
if cmdlineLen > 0 {
// Account for the additional NULL byte added by
// BytePtrFromString in kexecFileLoad. The kexec_file_load
// syscall expects a NULL-terminated string.
cmdlineLen++
}
return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags)
}

View file

@ -136,3 +136,16 @@ func SyncFileRange(fd int, off int64, n int64, flags int) error {
// order of their arguments.
return syncFileRange2(fd, flags, off, n)
}
//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error)
func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error {
cmdlineLen := len(cmdline)
if cmdlineLen > 0 {
// Account for the additional NULL byte added by
// BytePtrFromString in kexecFileLoad. The kexec_file_load
// syscall expects a NULL-terminated string.
cmdlineLen++
}
return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags)
}

View file

@ -322,3 +322,16 @@ func Poll(fds []PollFd, timeout int) (n int, err error) {
}
return poll(&fds[0], len(fds), timeout)
}
//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error)
func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error {
cmdlineLen := len(cmdline)
if cmdlineLen > 0 {
// Account for the additional NULL byte added by
// BytePtrFromString in kexecFileLoad. The kexec_file_load
// syscall expects a NULL-terminated string.
cmdlineLen++
}
return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags)
}

View file

@ -93,6 +93,23 @@ func nametomib(name string) (mib []_C_int, err error) {
return mib, nil
}
func SysctlClockinfo(name string) (*Clockinfo, error) {
mib, err := sysctlmib(name)
if err != nil {
return nil, err
}
n := uintptr(SizeofClockinfo)
var ci Clockinfo
if err := sysctl(mib, (*byte)(unsafe.Pointer(&ci)), &n, nil, 0); err != nil {
return nil, err
}
if n != SizeofClockinfo {
return nil, EIO
}
return &ci, nil
}
//sysnb pipe() (fd1 int, fd2 int, err error)
func Pipe(p []int) (err error) {
if len(p) != 2 {

View file

@ -43,6 +43,23 @@ func nametomib(name string) (mib []_C_int, err error) {
return nil, EINVAL
}
func SysctlUvmexp(name string) (*Uvmexp, error) {
mib, err := sysctlmib(name)
if err != nil {
return nil, err
}
n := uintptr(SizeofUvmexp)
var u Uvmexp
if err := sysctl(mib, (*byte)(unsafe.Pointer(&u)), &n, nil, 0); err != nil {
return nil, err
}
if n != SizeofUvmexp {
return nil, EIO
}
return &u, nil
}
//sysnb pipe(p *[2]_C_int) (err error)
func Pipe(p []int) (err error) {
if len(p) != 2 {

View file

@ -22,10 +22,10 @@ var (
)
const (
darwin64Bit = runtime.GOOS == "darwin" && sizeofPtr == 8
dragonfly64Bit = runtime.GOOS == "dragonfly" && sizeofPtr == 8
netbsd32Bit = runtime.GOOS == "netbsd" && sizeofPtr == 4
solaris64Bit = runtime.GOOS == "solaris" && sizeofPtr == 8
darwin64Bit = runtime.GOOS == "darwin" && SizeofPtr == 8
dragonfly64Bit = runtime.GOOS == "dragonfly" && SizeofPtr == 8
netbsd32Bit = runtime.GOOS == "netbsd" && SizeofPtr == 4
solaris64Bit = runtime.GOOS == "solaris" && SizeofPtr == 8
)
// Do the interface allocations only once for common

View file

@ -22,6 +22,11 @@ package unix
#include <utime.h>
#include <sys/utsname.h>
#include <sys/poll.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/statfs.h>
#include <sys/termio.h>
#include <sys/ioctl.h>
#include <termios.h>
@ -33,7 +38,6 @@ package unix
#include <dirent.h>
#include <fcntl.h>
#include <gcrypt.h>
enum {
sizeofPtr = sizeof(void*),
@ -55,14 +59,14 @@ struct sockaddr_any {
*/
import "C"
// Machine characteristics; for internal use.
// Machine characteristics
const (
sizeofPtr = C.sizeofPtr
sizeofShort = C.sizeof_short
sizeofInt = C.sizeof_int
sizeofLong = C.sizeof_long
sizeofLongLong = C.sizeof_longlong
SizeofPtr = C.sizeofPtr
SizeofShort = C.sizeof_short
SizeofInt = C.sizeof_int
SizeofLong = C.sizeof_long
SizeofLongLong = C.sizeof_longlong
PathMax = C.PATH_MAX
)
@ -224,6 +228,9 @@ type Flock_t C.struct_flock64
// Statfs
type Fsid_t C.struct_fsid_t
type Fsid64_t C.struct_fsid64_t
type Statfs_t C.struct_statfs
const RNDGETENTCNT = 0x80045200

View file

@ -70,14 +70,14 @@ struct sockaddr_any {
*/
import "C"
// Machine characteristics; for internal use.
// Machine characteristics
const (
sizeofPtr = C.sizeofPtr
sizeofShort = C.sizeof_short
sizeofInt = C.sizeof_int
sizeofLong = C.sizeof_long
sizeofLongLong = C.sizeof_longlong
SizeofPtr = C.sizeofPtr
SizeofShort = C.sizeof_short
SizeofInt = C.sizeof_int
SizeofLong = C.sizeof_long
SizeofLongLong = C.sizeof_longlong
)
// Basic types

View file

@ -65,14 +65,14 @@ struct sockaddr_any {
*/
import "C"
// Machine characteristics; for internal use.
// Machine characteristics
const (
sizeofPtr = C.sizeofPtr
sizeofShort = C.sizeof_short
sizeofInt = C.sizeof_int
sizeofLong = C.sizeof_long
sizeofLongLong = C.sizeof_longlong
SizeofPtr = C.sizeofPtr
SizeofShort = C.sizeof_short
SizeofInt = C.sizeof_int
SizeofLong = C.sizeof_long
SizeofLongLong = C.sizeof_longlong
)
// Basic types

View file

@ -154,14 +154,14 @@ struct if_msghdr8 {
*/
import "C"
// Machine characteristics; for internal use.
// Machine characteristics
const (
sizeofPtr = C.sizeofPtr
sizeofShort = C.sizeof_short
sizeofInt = C.sizeof_int
sizeofLong = C.sizeof_long
sizeofLongLong = C.sizeof_longlong
SizeofPtr = C.sizeofPtr
SizeofShort = C.sizeof_short
SizeofInt = C.sizeof_int
SizeofLong = C.sizeof_long
SizeofLongLong = C.sizeof_longlong
)
// Basic types

View file

@ -67,14 +67,14 @@ struct sockaddr_any {
*/
import "C"
// Machine characteristics; for internal use.
// Machine characteristics
const (
sizeofPtr = C.sizeofPtr
sizeofShort = C.sizeof_short
sizeofInt = C.sizeof_int
sizeofLong = C.sizeof_long
sizeofLongLong = C.sizeof_longlong
SizeofPtr = C.sizeofPtr
SizeofShort = C.sizeof_short
SizeofInt = C.sizeof_int
SizeofLong = C.sizeof_long
SizeofLongLong = C.sizeof_longlong
)
// Basic types
@ -279,3 +279,9 @@ type Sysctlnode C.struct_sysctlnode
// Uname
type Utsname C.struct_utsname
// Clockinfo
const SizeofClockinfo = C.sizeof_struct_clockinfo
type Clockinfo C.struct_clockinfo

View file

@ -38,6 +38,7 @@ package unix
#include <sys/un.h>
#include <sys/utsname.h>
#include <sys/wait.h>
#include <uvm/uvmexp.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_dl.h>
@ -66,14 +67,14 @@ struct sockaddr_any {
*/
import "C"
// Machine characteristics; for internal use.
// Machine characteristics
const (
sizeofPtr = C.sizeofPtr
sizeofShort = C.sizeof_short
sizeofInt = C.sizeof_int
sizeofLong = C.sizeof_long
sizeofLongLong = C.sizeof_longlong
SizeofPtr = C.sizeofPtr
SizeofShort = C.sizeof_short
SizeofInt = C.sizeof_int
SizeofLong = C.sizeof_long
SizeofLongLong = C.sizeof_longlong
)
// Basic types
@ -263,3 +264,9 @@ const (
// Uname
type Utsname C.struct_utsname
// Uvmexp
const SizeofUvmexp = C.sizeof_struct_uvmexp
type Uvmexp C.struct_uvmexp

View file

@ -75,14 +75,14 @@ struct sockaddr_any {
*/
import "C"
// Machine characteristics; for internal use.
// Machine characteristics
const (
sizeofPtr = C.sizeofPtr
sizeofShort = C.sizeof_short
sizeofInt = C.sizeof_int
sizeofLong = C.sizeof_long
sizeofLongLong = C.sizeof_longlong
SizeofPtr = C.sizeofPtr
SizeofShort = C.sizeof_short
SizeofInt = C.sizeof_int
SizeofLong = C.sizeof_long
SizeofLongLong = C.sizeof_longlong
PathMax = C.PATH_MAX
MaxHostNameLen = C.MAXHOSTNAMELEN
)

View file

@ -867,6 +867,9 @@ const (
TAB2 = 0x800
TAB3 = 0xc00
TABDLY = 0xc00
TCFLSH = 0x540c
TCGETA = 0x5405
TCGETS = 0x5401
TCIFLUSH = 0x0
TCIOFF = 0x2
TCIOFLUSH = 0x2
@ -915,6 +918,15 @@ const (
TCP_TIMESTAMP_OPTLEN = 0xc
TCP_UNSETPRIV = 0x28
TCSAFLUSH = 0x2
TCSBRK = 0x5409
TCSETA = 0x5406
TCSETAF = 0x5408
TCSETAW = 0x5407
TCSETS = 0x5402
TCSETSF = 0x5404
TCSETSW = 0x5403
TCXONC = 0x540b
TIOC = 0x5400
TIOCCBRK = 0x2000747a
TIOCCDTR = 0x20007478
TIOCCONS = 0x80047462

View file

@ -867,6 +867,9 @@ const (
TAB2 = 0x800
TAB3 = 0xc00
TABDLY = 0xc00
TCFLSH = 0x540c
TCGETA = 0x5405
TCGETS = 0x5401
TCIFLUSH = 0x0
TCIOFF = 0x2
TCIOFLUSH = 0x2
@ -915,6 +918,15 @@ const (
TCP_TIMESTAMP_OPTLEN = 0xc
TCP_UNSETPRIV = 0x28
TCSAFLUSH = 0x2
TCSBRK = 0x5409
TCSETA = 0x5406
TCSETAF = 0x5408
TCSETAW = 0x5407
TCSETS = 0x5402
TCSETSF = 0x5404
TCSETSW = 0x5403
TCXONC = 0x540b
TIOC = 0x5400
TIOCCBRK = 0x2000747a
TIOCCDTR = 0x20007478
TIOCCONS = 0xffffffff80047462

View file

@ -880,6 +880,40 @@ const (
MAP_VPAGETABLE = 0x2000
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MNT_ASYNC = 0x40
MNT_AUTOMOUNTED = 0x20
MNT_CMDFLAGS = 0xf0000
MNT_DEFEXPORTED = 0x200
MNT_DELEXPORT = 0x20000
MNT_EXKERB = 0x800
MNT_EXPORTANON = 0x400
MNT_EXPORTED = 0x100
MNT_EXPUBLIC = 0x20000000
MNT_EXRDONLY = 0x80
MNT_FORCE = 0x80000
MNT_IGNORE = 0x800000
MNT_LAZY = 0x4
MNT_LOCAL = 0x1000
MNT_NOATIME = 0x10000000
MNT_NOCLUSTERR = 0x40000000
MNT_NOCLUSTERW = 0x80000000
MNT_NODEV = 0x10
MNT_NOEXEC = 0x4
MNT_NOSUID = 0x8
MNT_NOSYMFOLLOW = 0x400000
MNT_NOWAIT = 0x2
MNT_QUOTA = 0x2000
MNT_RDONLY = 0x1
MNT_RELOAD = 0x40000
MNT_ROOTFS = 0x4000
MNT_SOFTDEP = 0x200000
MNT_SUIDDIR = 0x100000
MNT_SYNCHRONOUS = 0x2
MNT_TRIM = 0x1000000
MNT_UPDATE = 0x10000
MNT_USER = 0x8000
MNT_VISFLAGMASK = 0xf1f0ffff
MNT_WAIT = 0x1
MSG_CMSG_CLOEXEC = 0x1000
MSG_CTRUNC = 0x20
MSG_DONTROUTE = 0x4

View file

@ -64,6 +64,7 @@ const (
AF_VSOCK = 0x28
AF_WANPIPE = 0x19
AF_X25 = 0x9
AF_XDP = 0x2c
ALG_OP_DECRYPT = 0x0
ALG_OP_ENCRYPT = 0x1
ALG_SET_AEAD_ASSOCLEN = 0x4
@ -878,6 +879,26 @@ const (
IXOFF = 0x1000
IXON = 0x400
JFFS2_SUPER_MAGIC = 0x72b6
KEXEC_ARCH_386 = 0x30000
KEXEC_ARCH_68K = 0x40000
KEXEC_ARCH_AARCH64 = 0xb70000
KEXEC_ARCH_ARM = 0x280000
KEXEC_ARCH_DEFAULT = 0x0
KEXEC_ARCH_IA_64 = 0x320000
KEXEC_ARCH_MASK = 0xffff0000
KEXEC_ARCH_MIPS = 0x80000
KEXEC_ARCH_MIPS_LE = 0xa0000
KEXEC_ARCH_PPC = 0x140000
KEXEC_ARCH_PPC64 = 0x150000
KEXEC_ARCH_S390 = 0x160000
KEXEC_ARCH_SH = 0x2a0000
KEXEC_ARCH_X86_64 = 0x3e0000
KEXEC_FILE_NO_INITRAMFS = 0x4
KEXEC_FILE_ON_CRASH = 0x2
KEXEC_FILE_UNLOAD = 0x1
KEXEC_ON_CRASH = 0x1
KEXEC_PRESERVE_CONTEXT = 0x2
KEXEC_SEGMENT_MAX = 0x10
KEYCTL_ASSUME_AUTHORITY = 0x10
KEYCTL_CHOWN = 0x4
KEYCTL_CLEAR = 0x7
@ -1811,6 +1832,7 @@ const (
SOL_TIPC = 0x10f
SOL_TLS = 0x11a
SOL_X25 = 0x106
SOL_XDP = 0x11b
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x1e
SO_ATTACH_BPF = 0x32
@ -2293,6 +2315,26 @@ const (
XATTR_CREATE = 0x1
XATTR_REPLACE = 0x2
XCASE = 0x4
XDP_COPY = 0x2
XDP_FLAGS_DRV_MODE = 0x4
XDP_FLAGS_HW_MODE = 0x8
XDP_FLAGS_MASK = 0xf
XDP_FLAGS_MODES = 0xe
XDP_FLAGS_SKB_MODE = 0x2
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
XDP_MMAP_OFFSETS = 0x1
XDP_PGOFF_RX_RING = 0x0
XDP_PGOFF_TX_RING = 0x80000000
XDP_RX_RING = 0x2
XDP_SHARED_UMEM = 0x1
XDP_STATISTICS = 0x7
XDP_TX_RING = 0x3
XDP_UMEM_COMPLETION_RING = 0x6
XDP_UMEM_FILL_RING = 0x5
XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
XDP_UMEM_PGOFF_FILL_RING = 0x100000000
XDP_UMEM_REG = 0x4
XDP_ZEROCOPY = 0x4
XENFS_SUPER_MAGIC = 0xabba1974
XTABS = 0x1800
ZSMALLOC_MAGIC = 0x58295829

View file

@ -64,6 +64,7 @@ const (
AF_VSOCK = 0x28
AF_WANPIPE = 0x19
AF_X25 = 0x9
AF_XDP = 0x2c
ALG_OP_DECRYPT = 0x0
ALG_OP_ENCRYPT = 0x1
ALG_SET_AEAD_ASSOCLEN = 0x4
@ -878,6 +879,26 @@ const (
IXOFF = 0x1000
IXON = 0x400
JFFS2_SUPER_MAGIC = 0x72b6
KEXEC_ARCH_386 = 0x30000
KEXEC_ARCH_68K = 0x40000
KEXEC_ARCH_AARCH64 = 0xb70000
KEXEC_ARCH_ARM = 0x280000
KEXEC_ARCH_DEFAULT = 0x0
KEXEC_ARCH_IA_64 = 0x320000
KEXEC_ARCH_MASK = 0xffff0000
KEXEC_ARCH_MIPS = 0x80000
KEXEC_ARCH_MIPS_LE = 0xa0000
KEXEC_ARCH_PPC = 0x140000
KEXEC_ARCH_PPC64 = 0x150000
KEXEC_ARCH_S390 = 0x160000
KEXEC_ARCH_SH = 0x2a0000
KEXEC_ARCH_X86_64 = 0x3e0000
KEXEC_FILE_NO_INITRAMFS = 0x4
KEXEC_FILE_ON_CRASH = 0x2
KEXEC_FILE_UNLOAD = 0x1
KEXEC_ON_CRASH = 0x1
KEXEC_PRESERVE_CONTEXT = 0x2
KEXEC_SEGMENT_MAX = 0x10
KEYCTL_ASSUME_AUTHORITY = 0x10
KEYCTL_CHOWN = 0x4
KEYCTL_CLEAR = 0x7
@ -1812,6 +1833,7 @@ const (
SOL_TIPC = 0x10f
SOL_TLS = 0x11a
SOL_X25 = 0x106
SOL_XDP = 0x11b
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x1e
SO_ATTACH_BPF = 0x32
@ -2293,6 +2315,26 @@ const (
XATTR_CREATE = 0x1
XATTR_REPLACE = 0x2
XCASE = 0x4
XDP_COPY = 0x2
XDP_FLAGS_DRV_MODE = 0x4
XDP_FLAGS_HW_MODE = 0x8
XDP_FLAGS_MASK = 0xf
XDP_FLAGS_MODES = 0xe
XDP_FLAGS_SKB_MODE = 0x2
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
XDP_MMAP_OFFSETS = 0x1
XDP_PGOFF_RX_RING = 0x0
XDP_PGOFF_TX_RING = 0x80000000
XDP_RX_RING = 0x2
XDP_SHARED_UMEM = 0x1
XDP_STATISTICS = 0x7
XDP_TX_RING = 0x3
XDP_UMEM_COMPLETION_RING = 0x6
XDP_UMEM_FILL_RING = 0x5
XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
XDP_UMEM_PGOFF_FILL_RING = 0x100000000
XDP_UMEM_REG = 0x4
XDP_ZEROCOPY = 0x4
XENFS_SUPER_MAGIC = 0xabba1974
XTABS = 0x1800
ZSMALLOC_MAGIC = 0x58295829

View file

@ -64,6 +64,7 @@ const (
AF_VSOCK = 0x28
AF_WANPIPE = 0x19
AF_X25 = 0x9
AF_XDP = 0x2c
ALG_OP_DECRYPT = 0x0
ALG_OP_ENCRYPT = 0x1
ALG_SET_AEAD_ASSOCLEN = 0x4
@ -877,6 +878,26 @@ const (
IXOFF = 0x1000
IXON = 0x400
JFFS2_SUPER_MAGIC = 0x72b6
KEXEC_ARCH_386 = 0x30000
KEXEC_ARCH_68K = 0x40000
KEXEC_ARCH_AARCH64 = 0xb70000
KEXEC_ARCH_ARM = 0x280000
KEXEC_ARCH_DEFAULT = 0x0
KEXEC_ARCH_IA_64 = 0x320000
KEXEC_ARCH_MASK = 0xffff0000
KEXEC_ARCH_MIPS = 0x80000
KEXEC_ARCH_MIPS_LE = 0xa0000
KEXEC_ARCH_PPC = 0x140000
KEXEC_ARCH_PPC64 = 0x150000
KEXEC_ARCH_S390 = 0x160000
KEXEC_ARCH_SH = 0x2a0000
KEXEC_ARCH_X86_64 = 0x3e0000
KEXEC_FILE_NO_INITRAMFS = 0x4
KEXEC_FILE_ON_CRASH = 0x2
KEXEC_FILE_UNLOAD = 0x1
KEXEC_ON_CRASH = 0x1
KEXEC_PRESERVE_CONTEXT = 0x2
KEXEC_SEGMENT_MAX = 0x10
KEYCTL_ASSUME_AUTHORITY = 0x10
KEYCTL_CHOWN = 0x4
KEYCTL_CLEAR = 0x7
@ -1818,6 +1839,7 @@ const (
SOL_TIPC = 0x10f
SOL_TLS = 0x11a
SOL_X25 = 0x106
SOL_XDP = 0x11b
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x1e
SO_ATTACH_BPF = 0x32
@ -2299,6 +2321,26 @@ const (
XATTR_CREATE = 0x1
XATTR_REPLACE = 0x2
XCASE = 0x4
XDP_COPY = 0x2
XDP_FLAGS_DRV_MODE = 0x4
XDP_FLAGS_HW_MODE = 0x8
XDP_FLAGS_MASK = 0xf
XDP_FLAGS_MODES = 0xe
XDP_FLAGS_SKB_MODE = 0x2
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
XDP_MMAP_OFFSETS = 0x1
XDP_PGOFF_RX_RING = 0x0
XDP_PGOFF_TX_RING = 0x80000000
XDP_RX_RING = 0x2
XDP_SHARED_UMEM = 0x1
XDP_STATISTICS = 0x7
XDP_TX_RING = 0x3
XDP_UMEM_COMPLETION_RING = 0x6
XDP_UMEM_FILL_RING = 0x5
XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
XDP_UMEM_PGOFF_FILL_RING = 0x100000000
XDP_UMEM_REG = 0x4
XDP_ZEROCOPY = 0x4
XENFS_SUPER_MAGIC = 0xabba1974
XTABS = 0x1800
ZSMALLOC_MAGIC = 0x58295829

View file

@ -64,6 +64,7 @@ const (
AF_VSOCK = 0x28
AF_WANPIPE = 0x19
AF_X25 = 0x9
AF_XDP = 0x2c
ALG_OP_DECRYPT = 0x0
ALG_OP_ENCRYPT = 0x1
ALG_SET_AEAD_ASSOCLEN = 0x4
@ -880,6 +881,26 @@ const (
IXOFF = 0x1000
IXON = 0x400
JFFS2_SUPER_MAGIC = 0x72b6
KEXEC_ARCH_386 = 0x30000
KEXEC_ARCH_68K = 0x40000
KEXEC_ARCH_AARCH64 = 0xb70000
KEXEC_ARCH_ARM = 0x280000
KEXEC_ARCH_DEFAULT = 0x0
KEXEC_ARCH_IA_64 = 0x320000
KEXEC_ARCH_MASK = 0xffff0000
KEXEC_ARCH_MIPS = 0x80000
KEXEC_ARCH_MIPS_LE = 0xa0000
KEXEC_ARCH_PPC = 0x140000
KEXEC_ARCH_PPC64 = 0x150000
KEXEC_ARCH_S390 = 0x160000
KEXEC_ARCH_SH = 0x2a0000
KEXEC_ARCH_X86_64 = 0x3e0000
KEXEC_FILE_NO_INITRAMFS = 0x4
KEXEC_FILE_ON_CRASH = 0x2
KEXEC_FILE_UNLOAD = 0x1
KEXEC_ON_CRASH = 0x1
KEXEC_PRESERVE_CONTEXT = 0x2
KEXEC_SEGMENT_MAX = 0x10
KEYCTL_ASSUME_AUTHORITY = 0x10
KEYCTL_CHOWN = 0x4
KEYCTL_CLEAR = 0x7
@ -1802,6 +1823,7 @@ const (
SOL_TIPC = 0x10f
SOL_TLS = 0x11a
SOL_X25 = 0x106
SOL_XDP = 0x11b
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x1e
SO_ATTACH_BPF = 0x32
@ -2284,6 +2306,26 @@ const (
XATTR_CREATE = 0x1
XATTR_REPLACE = 0x2
XCASE = 0x4
XDP_COPY = 0x2
XDP_FLAGS_DRV_MODE = 0x4
XDP_FLAGS_HW_MODE = 0x8
XDP_FLAGS_MASK = 0xf
XDP_FLAGS_MODES = 0xe
XDP_FLAGS_SKB_MODE = 0x2
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
XDP_MMAP_OFFSETS = 0x1
XDP_PGOFF_RX_RING = 0x0
XDP_PGOFF_TX_RING = 0x80000000
XDP_RX_RING = 0x2
XDP_SHARED_UMEM = 0x1
XDP_STATISTICS = 0x7
XDP_TX_RING = 0x3
XDP_UMEM_COMPLETION_RING = 0x6
XDP_UMEM_FILL_RING = 0x5
XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
XDP_UMEM_PGOFF_FILL_RING = 0x100000000
XDP_UMEM_REG = 0x4
XDP_ZEROCOPY = 0x4
XENFS_SUPER_MAGIC = 0xabba1974
XTABS = 0x1800
ZSMALLOC_MAGIC = 0x58295829

View file

@ -64,6 +64,7 @@ const (
AF_VSOCK = 0x28
AF_WANPIPE = 0x19
AF_X25 = 0x9
AF_XDP = 0x2c
ALG_OP_DECRYPT = 0x0
ALG_OP_ENCRYPT = 0x1
ALG_SET_AEAD_ASSOCLEN = 0x4
@ -877,6 +878,26 @@ const (
IXOFF = 0x1000
IXON = 0x400
JFFS2_SUPER_MAGIC = 0x72b6
KEXEC_ARCH_386 = 0x30000
KEXEC_ARCH_68K = 0x40000
KEXEC_ARCH_AARCH64 = 0xb70000
KEXEC_ARCH_ARM = 0x280000
KEXEC_ARCH_DEFAULT = 0x0
KEXEC_ARCH_IA_64 = 0x320000
KEXEC_ARCH_MASK = 0xffff0000
KEXEC_ARCH_MIPS = 0x80000
KEXEC_ARCH_MIPS_LE = 0xa0000
KEXEC_ARCH_PPC = 0x140000
KEXEC_ARCH_PPC64 = 0x150000
KEXEC_ARCH_S390 = 0x160000
KEXEC_ARCH_SH = 0x2a0000
KEXEC_ARCH_X86_64 = 0x3e0000
KEXEC_FILE_NO_INITRAMFS = 0x4
KEXEC_FILE_ON_CRASH = 0x2
KEXEC_FILE_UNLOAD = 0x1
KEXEC_ON_CRASH = 0x1
KEXEC_PRESERVE_CONTEXT = 0x2
KEXEC_SEGMENT_MAX = 0x10
KEYCTL_ASSUME_AUTHORITY = 0x10
KEYCTL_CHOWN = 0x4
KEYCTL_CLEAR = 0x7
@ -1811,6 +1832,7 @@ const (
SOL_TIPC = 0x10f
SOL_TLS = 0x11a
SOL_X25 = 0x106
SOL_XDP = 0x11b
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x1009
SO_ATTACH_BPF = 0x32
@ -2295,6 +2317,26 @@ const (
XATTR_CREATE = 0x1
XATTR_REPLACE = 0x2
XCASE = 0x4
XDP_COPY = 0x2
XDP_FLAGS_DRV_MODE = 0x4
XDP_FLAGS_HW_MODE = 0x8
XDP_FLAGS_MASK = 0xf
XDP_FLAGS_MODES = 0xe
XDP_FLAGS_SKB_MODE = 0x2
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
XDP_MMAP_OFFSETS = 0x1
XDP_PGOFF_RX_RING = 0x0
XDP_PGOFF_TX_RING = 0x80000000
XDP_RX_RING = 0x2
XDP_SHARED_UMEM = 0x1
XDP_STATISTICS = 0x7
XDP_TX_RING = 0x3
XDP_UMEM_COMPLETION_RING = 0x6
XDP_UMEM_FILL_RING = 0x5
XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
XDP_UMEM_PGOFF_FILL_RING = 0x100000000
XDP_UMEM_REG = 0x4
XDP_ZEROCOPY = 0x4
XENFS_SUPER_MAGIC = 0xabba1974
XTABS = 0x1800
ZSMALLOC_MAGIC = 0x58295829

View file

@ -64,6 +64,7 @@ const (
AF_VSOCK = 0x28
AF_WANPIPE = 0x19
AF_X25 = 0x9
AF_XDP = 0x2c
ALG_OP_DECRYPT = 0x0
ALG_OP_ENCRYPT = 0x1
ALG_SET_AEAD_ASSOCLEN = 0x4
@ -877,6 +878,26 @@ const (
IXOFF = 0x1000
IXON = 0x400
JFFS2_SUPER_MAGIC = 0x72b6
KEXEC_ARCH_386 = 0x30000
KEXEC_ARCH_68K = 0x40000
KEXEC_ARCH_AARCH64 = 0xb70000
KEXEC_ARCH_ARM = 0x280000
KEXEC_ARCH_DEFAULT = 0x0
KEXEC_ARCH_IA_64 = 0x320000
KEXEC_ARCH_MASK = 0xffff0000
KEXEC_ARCH_MIPS = 0x80000
KEXEC_ARCH_MIPS_LE = 0xa0000
KEXEC_ARCH_PPC = 0x140000
KEXEC_ARCH_PPC64 = 0x150000
KEXEC_ARCH_S390 = 0x160000
KEXEC_ARCH_SH = 0x2a0000
KEXEC_ARCH_X86_64 = 0x3e0000
KEXEC_FILE_NO_INITRAMFS = 0x4
KEXEC_FILE_ON_CRASH = 0x2
KEXEC_FILE_UNLOAD = 0x1
KEXEC_ON_CRASH = 0x1
KEXEC_PRESERVE_CONTEXT = 0x2
KEXEC_SEGMENT_MAX = 0x10
KEYCTL_ASSUME_AUTHORITY = 0x10
KEYCTL_CHOWN = 0x4
KEYCTL_CLEAR = 0x7
@ -1811,6 +1832,7 @@ const (
SOL_TIPC = 0x10f
SOL_TLS = 0x11a
SOL_X25 = 0x106
SOL_XDP = 0x11b
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x1009
SO_ATTACH_BPF = 0x32
@ -2295,6 +2317,26 @@ const (
XATTR_CREATE = 0x1
XATTR_REPLACE = 0x2
XCASE = 0x4
XDP_COPY = 0x2
XDP_FLAGS_DRV_MODE = 0x4
XDP_FLAGS_HW_MODE = 0x8
XDP_FLAGS_MASK = 0xf
XDP_FLAGS_MODES = 0xe
XDP_FLAGS_SKB_MODE = 0x2
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
XDP_MMAP_OFFSETS = 0x1
XDP_PGOFF_RX_RING = 0x0
XDP_PGOFF_TX_RING = 0x80000000
XDP_RX_RING = 0x2
XDP_SHARED_UMEM = 0x1
XDP_STATISTICS = 0x7
XDP_TX_RING = 0x3
XDP_UMEM_COMPLETION_RING = 0x6
XDP_UMEM_FILL_RING = 0x5
XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
XDP_UMEM_PGOFF_FILL_RING = 0x100000000
XDP_UMEM_REG = 0x4
XDP_ZEROCOPY = 0x4
XENFS_SUPER_MAGIC = 0xabba1974
XTABS = 0x1800
ZSMALLOC_MAGIC = 0x58295829

View file

@ -64,6 +64,7 @@ const (
AF_VSOCK = 0x28
AF_WANPIPE = 0x19
AF_X25 = 0x9
AF_XDP = 0x2c
ALG_OP_DECRYPT = 0x0
ALG_OP_ENCRYPT = 0x1
ALG_SET_AEAD_ASSOCLEN = 0x4
@ -877,6 +878,26 @@ const (
IXOFF = 0x1000
IXON = 0x400
JFFS2_SUPER_MAGIC = 0x72b6
KEXEC_ARCH_386 = 0x30000
KEXEC_ARCH_68K = 0x40000
KEXEC_ARCH_AARCH64 = 0xb70000
KEXEC_ARCH_ARM = 0x280000
KEXEC_ARCH_DEFAULT = 0x0
KEXEC_ARCH_IA_64 = 0x320000
KEXEC_ARCH_MASK = 0xffff0000
KEXEC_ARCH_MIPS = 0x80000
KEXEC_ARCH_MIPS_LE = 0xa0000
KEXEC_ARCH_PPC = 0x140000
KEXEC_ARCH_PPC64 = 0x150000
KEXEC_ARCH_S390 = 0x160000
KEXEC_ARCH_SH = 0x2a0000
KEXEC_ARCH_X86_64 = 0x3e0000
KEXEC_FILE_NO_INITRAMFS = 0x4
KEXEC_FILE_ON_CRASH = 0x2
KEXEC_FILE_UNLOAD = 0x1
KEXEC_ON_CRASH = 0x1
KEXEC_PRESERVE_CONTEXT = 0x2
KEXEC_SEGMENT_MAX = 0x10
KEYCTL_ASSUME_AUTHORITY = 0x10
KEYCTL_CHOWN = 0x4
KEYCTL_CLEAR = 0x7
@ -1811,6 +1832,7 @@ const (
SOL_TIPC = 0x10f
SOL_TLS = 0x11a
SOL_X25 = 0x106
SOL_XDP = 0x11b
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x1009
SO_ATTACH_BPF = 0x32
@ -2295,6 +2317,26 @@ const (
XATTR_CREATE = 0x1
XATTR_REPLACE = 0x2
XCASE = 0x4
XDP_COPY = 0x2
XDP_FLAGS_DRV_MODE = 0x4
XDP_FLAGS_HW_MODE = 0x8
XDP_FLAGS_MASK = 0xf
XDP_FLAGS_MODES = 0xe
XDP_FLAGS_SKB_MODE = 0x2
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
XDP_MMAP_OFFSETS = 0x1
XDP_PGOFF_RX_RING = 0x0
XDP_PGOFF_TX_RING = 0x80000000
XDP_RX_RING = 0x2
XDP_SHARED_UMEM = 0x1
XDP_STATISTICS = 0x7
XDP_TX_RING = 0x3
XDP_UMEM_COMPLETION_RING = 0x6
XDP_UMEM_FILL_RING = 0x5
XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
XDP_UMEM_PGOFF_FILL_RING = 0x100000000
XDP_UMEM_REG = 0x4
XDP_ZEROCOPY = 0x4
XENFS_SUPER_MAGIC = 0xabba1974
XTABS = 0x1800
ZSMALLOC_MAGIC = 0x58295829

View file

@ -64,6 +64,7 @@ const (
AF_VSOCK = 0x28
AF_WANPIPE = 0x19
AF_X25 = 0x9
AF_XDP = 0x2c
ALG_OP_DECRYPT = 0x0
ALG_OP_ENCRYPT = 0x1
ALG_SET_AEAD_ASSOCLEN = 0x4
@ -877,6 +878,26 @@ const (
IXOFF = 0x1000
IXON = 0x400
JFFS2_SUPER_MAGIC = 0x72b6
KEXEC_ARCH_386 = 0x30000
KEXEC_ARCH_68K = 0x40000
KEXEC_ARCH_AARCH64 = 0xb70000
KEXEC_ARCH_ARM = 0x280000
KEXEC_ARCH_DEFAULT = 0x0
KEXEC_ARCH_IA_64 = 0x320000
KEXEC_ARCH_MASK = 0xffff0000
KEXEC_ARCH_MIPS = 0x80000
KEXEC_ARCH_MIPS_LE = 0xa0000
KEXEC_ARCH_PPC = 0x140000
KEXEC_ARCH_PPC64 = 0x150000
KEXEC_ARCH_S390 = 0x160000
KEXEC_ARCH_SH = 0x2a0000
KEXEC_ARCH_X86_64 = 0x3e0000
KEXEC_FILE_NO_INITRAMFS = 0x4
KEXEC_FILE_ON_CRASH = 0x2
KEXEC_FILE_UNLOAD = 0x1
KEXEC_ON_CRASH = 0x1
KEXEC_PRESERVE_CONTEXT = 0x2
KEXEC_SEGMENT_MAX = 0x10
KEYCTL_ASSUME_AUTHORITY = 0x10
KEYCTL_CHOWN = 0x4
KEYCTL_CLEAR = 0x7
@ -1811,6 +1832,7 @@ const (
SOL_TIPC = 0x10f
SOL_TLS = 0x11a
SOL_X25 = 0x106
SOL_XDP = 0x11b
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x1009
SO_ATTACH_BPF = 0x32
@ -2295,6 +2317,26 @@ const (
XATTR_CREATE = 0x1
XATTR_REPLACE = 0x2
XCASE = 0x4
XDP_COPY = 0x2
XDP_FLAGS_DRV_MODE = 0x4
XDP_FLAGS_HW_MODE = 0x8
XDP_FLAGS_MASK = 0xf
XDP_FLAGS_MODES = 0xe
XDP_FLAGS_SKB_MODE = 0x2
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
XDP_MMAP_OFFSETS = 0x1
XDP_PGOFF_RX_RING = 0x0
XDP_PGOFF_TX_RING = 0x80000000
XDP_RX_RING = 0x2
XDP_SHARED_UMEM = 0x1
XDP_STATISTICS = 0x7
XDP_TX_RING = 0x3
XDP_UMEM_COMPLETION_RING = 0x6
XDP_UMEM_FILL_RING = 0x5
XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
XDP_UMEM_PGOFF_FILL_RING = 0x100000000
XDP_UMEM_REG = 0x4
XDP_ZEROCOPY = 0x4
XENFS_SUPER_MAGIC = 0xabba1974
XTABS = 0x1800
ZSMALLOC_MAGIC = 0x58295829

View file

@ -64,6 +64,7 @@ const (
AF_VSOCK = 0x28
AF_WANPIPE = 0x19
AF_X25 = 0x9
AF_XDP = 0x2c
ALG_OP_DECRYPT = 0x0
ALG_OP_ENCRYPT = 0x1
ALG_SET_AEAD_ASSOCLEN = 0x4
@ -877,6 +878,26 @@ const (
IXOFF = 0x400
IXON = 0x200
JFFS2_SUPER_MAGIC = 0x72b6
KEXEC_ARCH_386 = 0x30000
KEXEC_ARCH_68K = 0x40000
KEXEC_ARCH_AARCH64 = 0xb70000
KEXEC_ARCH_ARM = 0x280000
KEXEC_ARCH_DEFAULT = 0x0
KEXEC_ARCH_IA_64 = 0x320000
KEXEC_ARCH_MASK = 0xffff0000
KEXEC_ARCH_MIPS = 0x80000
KEXEC_ARCH_MIPS_LE = 0xa0000
KEXEC_ARCH_PPC = 0x140000
KEXEC_ARCH_PPC64 = 0x150000
KEXEC_ARCH_S390 = 0x160000
KEXEC_ARCH_SH = 0x2a0000
KEXEC_ARCH_X86_64 = 0x3e0000
KEXEC_FILE_NO_INITRAMFS = 0x4
KEXEC_FILE_ON_CRASH = 0x2
KEXEC_FILE_UNLOAD = 0x1
KEXEC_ON_CRASH = 0x1
KEXEC_PRESERVE_CONTEXT = 0x2
KEXEC_SEGMENT_MAX = 0x10
KEYCTL_ASSUME_AUTHORITY = 0x10
KEYCTL_CHOWN = 0x4
KEYCTL_CLEAR = 0x7
@ -1867,6 +1888,7 @@ const (
SOL_TIPC = 0x10f
SOL_TLS = 0x11a
SOL_X25 = 0x106
SOL_XDP = 0x11b
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x1e
SO_ATTACH_BPF = 0x32
@ -2352,6 +2374,26 @@ const (
XATTR_CREATE = 0x1
XATTR_REPLACE = 0x2
XCASE = 0x4000
XDP_COPY = 0x2
XDP_FLAGS_DRV_MODE = 0x4
XDP_FLAGS_HW_MODE = 0x8
XDP_FLAGS_MASK = 0xf
XDP_FLAGS_MODES = 0xe
XDP_FLAGS_SKB_MODE = 0x2
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
XDP_MMAP_OFFSETS = 0x1
XDP_PGOFF_RX_RING = 0x0
XDP_PGOFF_TX_RING = 0x80000000
XDP_RX_RING = 0x2
XDP_SHARED_UMEM = 0x1
XDP_STATISTICS = 0x7
XDP_TX_RING = 0x3
XDP_UMEM_COMPLETION_RING = 0x6
XDP_UMEM_FILL_RING = 0x5
XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
XDP_UMEM_PGOFF_FILL_RING = 0x100000000
XDP_UMEM_REG = 0x4
XDP_ZEROCOPY = 0x4
XENFS_SUPER_MAGIC = 0xabba1974
XTABS = 0xc00
ZSMALLOC_MAGIC = 0x58295829

View file

@ -64,6 +64,7 @@ const (
AF_VSOCK = 0x28
AF_WANPIPE = 0x19
AF_X25 = 0x9
AF_XDP = 0x2c
ALG_OP_DECRYPT = 0x0
ALG_OP_ENCRYPT = 0x1
ALG_SET_AEAD_ASSOCLEN = 0x4
@ -877,6 +878,26 @@ const (
IXOFF = 0x400
IXON = 0x200
JFFS2_SUPER_MAGIC = 0x72b6
KEXEC_ARCH_386 = 0x30000
KEXEC_ARCH_68K = 0x40000
KEXEC_ARCH_AARCH64 = 0xb70000
KEXEC_ARCH_ARM = 0x280000
KEXEC_ARCH_DEFAULT = 0x0
KEXEC_ARCH_IA_64 = 0x320000
KEXEC_ARCH_MASK = 0xffff0000
KEXEC_ARCH_MIPS = 0x80000
KEXEC_ARCH_MIPS_LE = 0xa0000
KEXEC_ARCH_PPC = 0x140000
KEXEC_ARCH_PPC64 = 0x150000
KEXEC_ARCH_S390 = 0x160000
KEXEC_ARCH_SH = 0x2a0000
KEXEC_ARCH_X86_64 = 0x3e0000
KEXEC_FILE_NO_INITRAMFS = 0x4
KEXEC_FILE_ON_CRASH = 0x2
KEXEC_FILE_UNLOAD = 0x1
KEXEC_ON_CRASH = 0x1
KEXEC_PRESERVE_CONTEXT = 0x2
KEXEC_SEGMENT_MAX = 0x10
KEYCTL_ASSUME_AUTHORITY = 0x10
KEYCTL_CHOWN = 0x4
KEYCTL_CLEAR = 0x7
@ -1867,6 +1888,7 @@ const (
SOL_TIPC = 0x10f
SOL_TLS = 0x11a
SOL_X25 = 0x106
SOL_XDP = 0x11b
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x1e
SO_ATTACH_BPF = 0x32
@ -2352,6 +2374,26 @@ const (
XATTR_CREATE = 0x1
XATTR_REPLACE = 0x2
XCASE = 0x4000
XDP_COPY = 0x2
XDP_FLAGS_DRV_MODE = 0x4
XDP_FLAGS_HW_MODE = 0x8
XDP_FLAGS_MASK = 0xf
XDP_FLAGS_MODES = 0xe
XDP_FLAGS_SKB_MODE = 0x2
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
XDP_MMAP_OFFSETS = 0x1
XDP_PGOFF_RX_RING = 0x0
XDP_PGOFF_TX_RING = 0x80000000
XDP_RX_RING = 0x2
XDP_SHARED_UMEM = 0x1
XDP_STATISTICS = 0x7
XDP_TX_RING = 0x3
XDP_UMEM_COMPLETION_RING = 0x6
XDP_UMEM_FILL_RING = 0x5
XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
XDP_UMEM_PGOFF_FILL_RING = 0x100000000
XDP_UMEM_REG = 0x4
XDP_ZEROCOPY = 0x4
XENFS_SUPER_MAGIC = 0xabba1974
XTABS = 0xc00
ZSMALLOC_MAGIC = 0x58295829

View file

@ -64,6 +64,7 @@ const (
AF_VSOCK = 0x28
AF_WANPIPE = 0x19
AF_X25 = 0x9
AF_XDP = 0x2c
ALG_OP_DECRYPT = 0x0
ALG_OP_ENCRYPT = 0x1
ALG_SET_AEAD_ASSOCLEN = 0x4
@ -877,6 +878,26 @@ const (
IXOFF = 0x1000
IXON = 0x400
JFFS2_SUPER_MAGIC = 0x72b6
KEXEC_ARCH_386 = 0x30000
KEXEC_ARCH_68K = 0x40000
KEXEC_ARCH_AARCH64 = 0xb70000
KEXEC_ARCH_ARM = 0x280000
KEXEC_ARCH_DEFAULT = 0x0
KEXEC_ARCH_IA_64 = 0x320000
KEXEC_ARCH_MASK = 0xffff0000
KEXEC_ARCH_MIPS = 0x80000
KEXEC_ARCH_MIPS_LE = 0xa0000
KEXEC_ARCH_PPC = 0x140000
KEXEC_ARCH_PPC64 = 0x150000
KEXEC_ARCH_S390 = 0x160000
KEXEC_ARCH_SH = 0x2a0000
KEXEC_ARCH_X86_64 = 0x3e0000
KEXEC_FILE_NO_INITRAMFS = 0x4
KEXEC_FILE_ON_CRASH = 0x2
KEXEC_FILE_UNLOAD = 0x1
KEXEC_ON_CRASH = 0x1
KEXEC_PRESERVE_CONTEXT = 0x2
KEXEC_SEGMENT_MAX = 0x10
KEYCTL_ASSUME_AUTHORITY = 0x10
KEYCTL_CHOWN = 0x4
KEYCTL_CLEAR = 0x7
@ -1799,6 +1820,7 @@ const (
SOL_TIPC = 0x10f
SOL_TLS = 0x11a
SOL_X25 = 0x106
SOL_XDP = 0x11b
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x1e
SO_ATTACH_BPF = 0x32
@ -2280,6 +2302,26 @@ const (
XATTR_CREATE = 0x1
XATTR_REPLACE = 0x2
XCASE = 0x4
XDP_COPY = 0x2
XDP_FLAGS_DRV_MODE = 0x4
XDP_FLAGS_HW_MODE = 0x8
XDP_FLAGS_MASK = 0xf
XDP_FLAGS_MODES = 0xe
XDP_FLAGS_SKB_MODE = 0x2
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
XDP_MMAP_OFFSETS = 0x1
XDP_PGOFF_RX_RING = 0x0
XDP_PGOFF_TX_RING = 0x80000000
XDP_RX_RING = 0x2
XDP_SHARED_UMEM = 0x1
XDP_STATISTICS = 0x7
XDP_TX_RING = 0x3
XDP_UMEM_COMPLETION_RING = 0x6
XDP_UMEM_FILL_RING = 0x5
XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
XDP_UMEM_PGOFF_FILL_RING = 0x100000000
XDP_UMEM_REG = 0x4
XDP_ZEROCOPY = 0x4
XENFS_SUPER_MAGIC = 0xabba1974
XTABS = 0x1800
ZSMALLOC_MAGIC = 0x58295829

View file

@ -64,6 +64,7 @@ const (
AF_VSOCK = 0x28
AF_WANPIPE = 0x19
AF_X25 = 0x9
AF_XDP = 0x2c
ALG_OP_DECRYPT = 0x0
ALG_OP_ENCRYPT = 0x1
ALG_SET_AEAD_ASSOCLEN = 0x4
@ -877,6 +878,26 @@ const (
IXOFF = 0x1000
IXON = 0x400
JFFS2_SUPER_MAGIC = 0x72b6
KEXEC_ARCH_386 = 0x30000
KEXEC_ARCH_68K = 0x40000
KEXEC_ARCH_AARCH64 = 0xb70000
KEXEC_ARCH_ARM = 0x280000
KEXEC_ARCH_DEFAULT = 0x0
KEXEC_ARCH_IA_64 = 0x320000
KEXEC_ARCH_MASK = 0xffff0000
KEXEC_ARCH_MIPS = 0x80000
KEXEC_ARCH_MIPS_LE = 0xa0000
KEXEC_ARCH_PPC = 0x140000
KEXEC_ARCH_PPC64 = 0x150000
KEXEC_ARCH_S390 = 0x160000
KEXEC_ARCH_SH = 0x2a0000
KEXEC_ARCH_X86_64 = 0x3e0000
KEXEC_FILE_NO_INITRAMFS = 0x4
KEXEC_FILE_ON_CRASH = 0x2
KEXEC_FILE_UNLOAD = 0x1
KEXEC_ON_CRASH = 0x1
KEXEC_PRESERVE_CONTEXT = 0x2
KEXEC_SEGMENT_MAX = 0x10
KEYCTL_ASSUME_AUTHORITY = 0x10
KEYCTL_CHOWN = 0x4
KEYCTL_CLEAR = 0x7
@ -1872,6 +1893,7 @@ const (
SOL_TIPC = 0x10f
SOL_TLS = 0x11a
SOL_X25 = 0x106
SOL_XDP = 0x11b
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x1e
SO_ATTACH_BPF = 0x32
@ -2353,6 +2375,26 @@ const (
XATTR_CREATE = 0x1
XATTR_REPLACE = 0x2
XCASE = 0x4
XDP_COPY = 0x2
XDP_FLAGS_DRV_MODE = 0x4
XDP_FLAGS_HW_MODE = 0x8
XDP_FLAGS_MASK = 0xf
XDP_FLAGS_MODES = 0xe
XDP_FLAGS_SKB_MODE = 0x2
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
XDP_MMAP_OFFSETS = 0x1
XDP_PGOFF_RX_RING = 0x0
XDP_PGOFF_TX_RING = 0x80000000
XDP_RX_RING = 0x2
XDP_SHARED_UMEM = 0x1
XDP_STATISTICS = 0x7
XDP_TX_RING = 0x3
XDP_UMEM_COMPLETION_RING = 0x6
XDP_UMEM_FILL_RING = 0x5
XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
XDP_UMEM_PGOFF_FILL_RING = 0x100000000
XDP_UMEM_REG = 0x4
XDP_ZEROCOPY = 0x4
XENFS_SUPER_MAGIC = 0xabba1974
XTABS = 0x1800
ZSMALLOC_MAGIC = 0x58295829

View file

@ -1,10 +1,10 @@
// mkerrors.sh -m64
// Code generated by the command above; DO NOT EDIT.
// mkerrors.sh -Wall -Werror -static -I/tmp/include
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build sparc64,linux
// Created by cgo -godefs - DO NOT EDIT
// cgo -godefs -- -m64 _const.go
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix
@ -1969,174 +1969,182 @@ const (
)
// Error table
var errors = [...]string{
1: "operation not permitted",
2: "no such file or directory",
3: "no such process",
4: "interrupted system call",
5: "input/output error",
6: "no such device or address",
7: "argument list too long",
8: "exec format error",
9: "bad file descriptor",
10: "no child processes",
11: "resource temporarily unavailable",
12: "cannot allocate memory",
13: "permission denied",
14: "bad address",
15: "block device required",
16: "device or resource busy",
17: "file exists",
18: "invalid cross-device link",
19: "no such device",
20: "not a directory",
21: "is a directory",
22: "invalid argument",
23: "too many open files in system",
24: "too many open files",
25: "inappropriate ioctl for device",
26: "text file busy",
27: "file too large",
28: "no space left on device",
29: "illegal seek",
30: "read-only file system",
31: "too many links",
32: "broken pipe",
33: "numerical argument out of domain",
34: "numerical result out of range",
36: "operation now in progress",
37: "operation already in progress",
38: "socket operation on non-socket",
39: "destination address required",
40: "message too long",
41: "protocol wrong type for socket",
42: "protocol not available",
43: "protocol not supported",
44: "socket type not supported",
45: "operation not supported",
46: "protocol family not supported",
47: "address family not supported by protocol",
48: "address already in use",
49: "cannot assign requested address",
50: "network is down",
51: "network is unreachable",
52: "network dropped connection on reset",
53: "software caused connection abort",
54: "connection reset by peer",
55: "no buffer space available",
56: "transport endpoint is already connected",
57: "transport endpoint is not connected",
58: "cannot send after transport endpoint shutdown",
59: "too many references: cannot splice",
60: "connection timed out",
61: "connection refused",
62: "too many levels of symbolic links",
63: "file name too long",
64: "host is down",
65: "no route to host",
66: "directory not empty",
67: "too many processes",
68: "too many users",
69: "disk quota exceeded",
70: "stale file handle",
71: "object is remote",
72: "device not a stream",
73: "timer expired",
74: "out of streams resources",
75: "no message of desired type",
76: "bad message",
77: "identifier removed",
78: "resource deadlock avoided",
79: "no locks available",
80: "machine is not on the network",
81: "unknown error 81",
82: "link has been severed",
83: "advertise error",
84: "srmount error",
85: "communication error on send",
86: "protocol error",
87: "multihop attempted",
88: "RFS specific error",
89: "remote address changed",
90: "function not implemented",
91: "streams pipe error",
92: "value too large for defined data type",
93: "file descriptor in bad state",
94: "channel number out of range",
95: "level 2 not synchronized",
96: "level 3 halted",
97: "level 3 reset",
98: "link number out of range",
99: "protocol driver not attached",
100: "no CSI structure available",
101: "level 2 halted",
102: "invalid exchange",
103: "invalid request descriptor",
104: "exchange full",
105: "no anode",
106: "invalid request code",
107: "invalid slot",
108: "file locking deadlock error",
109: "bad font file format",
110: "cannot exec a shared library directly",
111: "no data available",
112: "accessing a corrupted shared library",
113: "package not installed",
114: "can not access a needed shared library",
115: "name not unique on network",
116: "interrupted system call should be restarted",
117: "structure needs cleaning",
118: "not a XENIX named type file",
119: "no XENIX semaphores available",
120: "is a named type file",
121: "remote I/O error",
122: "invalid or incomplete multibyte or wide character",
123: "attempting to link in too many shared libraries",
124: ".lib section in a.out corrupted",
125: "no medium found",
126: "wrong medium type",
127: "operation canceled",
128: "required key not available",
129: "key has expired",
130: "key has been revoked",
131: "key was rejected by service",
132: "owner died",
133: "state not recoverable",
134: "operation not possible due to RF-kill",
135: "memory page has hardware error",
var errorList = [...]struct {
num syscall.Errno
name string
desc string
}{
{1, "EPERM", "operation not permitted"},
{2, "ENOENT", "no such file or directory"},
{3, "ESRCH", "no such process"},
{4, "EINTR", "interrupted system call"},
{5, "EIO", "input/output error"},
{6, "ENXIO", "no such device or address"},
{7, "E2BIG", "argument list too long"},
{8, "ENOEXEC", "exec format error"},
{9, "EBADF", "bad file descriptor"},
{10, "ECHILD", "no child processes"},
{11, "EAGAIN", "resource temporarily unavailable"},
{12, "ENOMEM", "cannot allocate memory"},
{13, "EACCES", "permission denied"},
{14, "EFAULT", "bad address"},
{15, "ENOTBLK", "block device required"},
{16, "EBUSY", "device or resource busy"},
{17, "EEXIST", "file exists"},
{18, "EXDEV", "invalid cross-device link"},
{19, "ENODEV", "no such device"},
{20, "ENOTDIR", "not a directory"},
{21, "EISDIR", "is a directory"},
{22, "EINVAL", "invalid argument"},
{23, "ENFILE", "too many open files in system"},
{24, "EMFILE", "too many open files"},
{25, "ENOTTY", "inappropriate ioctl for device"},
{26, "ETXTBSY", "text file busy"},
{27, "EFBIG", "file too large"},
{28, "ENOSPC", "no space left on device"},
{29, "ESPIPE", "illegal seek"},
{30, "EROFS", "read-only file system"},
{31, "EMLINK", "too many links"},
{32, "EPIPE", "broken pipe"},
{33, "EDOM", "numerical argument out of domain"},
{34, "ERANGE", "numerical result out of range"},
{36, "EINPROGRESS", "operation now in progress"},
{37, "EALREADY", "operation already in progress"},
{38, "ENOTSOCK", "socket operation on non-socket"},
{39, "EDESTADDRREQ", "destination address required"},
{40, "EMSGSIZE", "message too long"},
{41, "EPROTOTYPE", "protocol wrong type for socket"},
{42, "ENOPROTOOPT", "protocol not available"},
{43, "EPROTONOSUPPORT", "protocol not supported"},
{44, "ESOCKTNOSUPPORT", "socket type not supported"},
{45, "ENOTSUP", "operation not supported"},
{46, "EPFNOSUPPORT", "protocol family not supported"},
{47, "EAFNOSUPPORT", "address family not supported by protocol"},
{48, "EADDRINUSE", "address already in use"},
{49, "EADDRNOTAVAIL", "cannot assign requested address"},
{50, "ENETDOWN", "network is down"},
{51, "ENETUNREACH", "network is unreachable"},
{52, "ENETRESET", "network dropped connection on reset"},
{53, "ECONNABORTED", "software caused connection abort"},
{54, "ECONNRESET", "connection reset by peer"},
{55, "ENOBUFS", "no buffer space available"},
{56, "EISCONN", "transport endpoint is already connected"},
{57, "ENOTCONN", "transport endpoint is not connected"},
{58, "ESHUTDOWN", "cannot send after transport endpoint shutdown"},
{59, "ETOOMANYREFS", "too many references: cannot splice"},
{60, "ETIMEDOUT", "connection timed out"},
{61, "ECONNREFUSED", "connection refused"},
{62, "ELOOP", "too many levels of symbolic links"},
{63, "ENAMETOOLONG", "file name too long"},
{64, "EHOSTDOWN", "host is down"},
{65, "EHOSTUNREACH", "no route to host"},
{66, "ENOTEMPTY", "directory not empty"},
{67, "EPROCLIM", "too many processes"},
{68, "EUSERS", "too many users"},
{69, "EDQUOT", "disk quota exceeded"},
{70, "ESTALE", "stale file handle"},
{71, "EREMOTE", "object is remote"},
{72, "ENOSTR", "device not a stream"},
{73, "ETIME", "timer expired"},
{74, "ENOSR", "out of streams resources"},
{75, "ENOMSG", "no message of desired type"},
{76, "EBADMSG", "bad message"},
{77, "EIDRM", "identifier removed"},
{78, "EDEADLK", "resource deadlock avoided"},
{79, "ENOLCK", "no locks available"},
{80, "ENONET", "machine is not on the network"},
{81, "ERREMOTE", "unknown error 81"},
{82, "ENOLINK", "link has been severed"},
{83, "EADV", "advertise error"},
{84, "ESRMNT", "srmount error"},
{85, "ECOMM", "communication error on send"},
{86, "EPROTO", "protocol error"},
{87, "EMULTIHOP", "multihop attempted"},
{88, "EDOTDOT", "RFS specific error"},
{89, "EREMCHG", "remote address changed"},
{90, "ENOSYS", "function not implemented"},
{91, "ESTRPIPE", "streams pipe error"},
{92, "EOVERFLOW", "value too large for defined data type"},
{93, "EBADFD", "file descriptor in bad state"},
{94, "ECHRNG", "channel number out of range"},
{95, "EL2NSYNC", "level 2 not synchronized"},
{96, "EL3HLT", "level 3 halted"},
{97, "EL3RST", "level 3 reset"},
{98, "ELNRNG", "link number out of range"},
{99, "EUNATCH", "protocol driver not attached"},
{100, "ENOCSI", "no CSI structure available"},
{101, "EL2HLT", "level 2 halted"},
{102, "EBADE", "invalid exchange"},
{103, "EBADR", "invalid request descriptor"},
{104, "EXFULL", "exchange full"},
{105, "ENOANO", "no anode"},
{106, "EBADRQC", "invalid request code"},
{107, "EBADSLT", "invalid slot"},
{108, "EDEADLOCK", "file locking deadlock error"},
{109, "EBFONT", "bad font file format"},
{110, "ELIBEXEC", "cannot exec a shared library directly"},
{111, "ENODATA", "no data available"},
{112, "ELIBBAD", "accessing a corrupted shared library"},
{113, "ENOPKG", "package not installed"},
{114, "ELIBACC", "can not access a needed shared library"},
{115, "ENOTUNIQ", "name not unique on network"},
{116, "ERESTART", "interrupted system call should be restarted"},
{117, "EUCLEAN", "structure needs cleaning"},
{118, "ENOTNAM", "not a XENIX named type file"},
{119, "ENAVAIL", "no XENIX semaphores available"},
{120, "EISNAM", "is a named type file"},
{121, "EREMOTEIO", "remote I/O error"},
{122, "EILSEQ", "invalid or incomplete multibyte or wide character"},
{123, "ELIBMAX", "attempting to link in too many shared libraries"},
{124, "ELIBSCN", ".lib section in a.out corrupted"},
{125, "ENOMEDIUM", "no medium found"},
{126, "EMEDIUMTYPE", "wrong medium type"},
{127, "ECANCELED", "operation canceled"},
{128, "ENOKEY", "required key not available"},
{129, "EKEYEXPIRED", "key has expired"},
{130, "EKEYREVOKED", "key has been revoked"},
{131, "EKEYREJECTED", "key was rejected by service"},
{132, "EOWNERDEAD", "owner died"},
{133, "ENOTRECOVERABLE", "state not recoverable"},
{134, "ERFKILL", "operation not possible due to RF-kill"},
{135, "EHWPOISON", "memory page has hardware error"},
}
// Signal table
var signals = [...]string{
1: "hangup",
2: "interrupt",
3: "quit",
4: "illegal instruction",
5: "trace/breakpoint trap",
6: "aborted",
7: "EMT trap",
8: "floating point exception",
9: "killed",
10: "bus error",
11: "segmentation fault",
12: "bad system call",
13: "broken pipe",
14: "alarm clock",
15: "terminated",
16: "urgent I/O condition",
17: "stopped (signal)",
18: "stopped",
19: "continued",
20: "child exited",
21: "stopped (tty input)",
22: "stopped (tty output)",
23: "I/O possible",
24: "CPU time limit exceeded",
25: "file size limit exceeded",
26: "virtual timer expired",
27: "profiling timer expired",
28: "window changed",
29: "resource lost",
30: "user defined signal 1",
31: "user defined signal 2",
var signalList = [...]struct {
num syscall.Signal
name string
desc string
}{
{1, "SIGHUP", "hangup"},
{2, "SIGINT", "interrupt"},
{3, "SIGQUIT", "quit"},
{4, "SIGILL", "illegal instruction"},
{5, "SIGTRAP", "trace/breakpoint trap"},
{6, "SIGABRT", "aborted"},
{7, "SIGEMT", "EMT trap"},
{8, "SIGFPE", "floating point exception"},
{9, "SIGKILL", "killed"},
{10, "SIGBUS", "bus error"},
{11, "SIGSEGV", "segmentation fault"},
{12, "SIGSYS", "bad system call"},
{13, "SIGPIPE", "broken pipe"},
{14, "SIGALRM", "alarm clock"},
{15, "SIGTERM", "terminated"},
{16, "SIGURG", "urgent I/O condition"},
{17, "SIGSTOP", "stopped (signal)"},
{18, "SIGTSTP", "stopped"},
{19, "SIGCONT", "continued"},
{20, "SIGCHLD", "child exited"},
{21, "SIGTTIN", "stopped (tty input)"},
{22, "SIGTTOU", "stopped (tty output)"},
{23, "SIGIO", "I/O possible"},
{24, "SIGXCPU", "CPU time limit exceeded"},
{25, "SIGXFSZ", "file size limit exceeded"},
{26, "SIGVTALRM", "virtual timer expired"},
{27, "SIGPROF", "profiling timer expired"},
{28, "SIGWINCH", "window changed"},
{29, "SIGLOST", "power failure"},
{30, "SIGUSR1", "user defined signal 1"},
{31, "SIGUSR2", "user defined signal 2"},
}

View file

@ -1020,6 +1020,43 @@ const (
MAP_WIRED = 0x800
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MNT_ASYNC = 0x40
MNT_BASIC_FLAGS = 0xe782807f
MNT_DEFEXPORTED = 0x200
MNT_DISCARD = 0x800000
MNT_EXKERB = 0x800
MNT_EXNORESPORT = 0x8000000
MNT_EXPORTANON = 0x400
MNT_EXPORTED = 0x100
MNT_EXPUBLIC = 0x10000000
MNT_EXRDONLY = 0x80
MNT_EXTATTR = 0x1000000
MNT_FORCE = 0x80000
MNT_GETARGS = 0x400000
MNT_IGNORE = 0x100000
MNT_LAZY = 0x3
MNT_LOCAL = 0x1000
MNT_LOG = 0x2000000
MNT_NOATIME = 0x4000000
MNT_NOCOREDUMP = 0x8000
MNT_NODEV = 0x10
MNT_NODEVMTIME = 0x40000000
MNT_NOEXEC = 0x4
MNT_NOSUID = 0x8
MNT_NOWAIT = 0x2
MNT_OP_FLAGS = 0x4d0000
MNT_QUOTA = 0x2000
MNT_RDONLY = 0x1
MNT_RELATIME = 0x20000
MNT_RELOAD = 0x40000
MNT_ROOTFS = 0x4000
MNT_SOFTDEP = 0x80000000
MNT_SYMPERM = 0x20000000
MNT_SYNCHRONOUS = 0x2
MNT_UNION = 0x20
MNT_UPDATE = 0x10000
MNT_VISFLAGMASK = 0xff90ffff
MNT_WAIT = 0x1
MSG_BCAST = 0x100
MSG_CMSG_CLOEXEC = 0x800
MSG_CONTROLMBUF = 0x2000000
@ -1113,7 +1150,10 @@ const (
RLIMIT_CPU = 0x0
RLIMIT_DATA = 0x2
RLIMIT_FSIZE = 0x1
RLIMIT_MEMLOCK = 0x6
RLIMIT_NOFILE = 0x8
RLIMIT_NPROC = 0x7
RLIMIT_RSS = 0x5
RLIMIT_STACK = 0x3
RLIM_INFINITY = 0x7fffffffffffffff
RTAX_AUTHOR = 0x6

View file

@ -1010,6 +1010,43 @@ const (
MAP_WIRED = 0x800
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MNT_ASYNC = 0x40
MNT_BASIC_FLAGS = 0xe782807f
MNT_DEFEXPORTED = 0x200
MNT_DISCARD = 0x800000
MNT_EXKERB = 0x800
MNT_EXNORESPORT = 0x8000000
MNT_EXPORTANON = 0x400
MNT_EXPORTED = 0x100
MNT_EXPUBLIC = 0x10000000
MNT_EXRDONLY = 0x80
MNT_EXTATTR = 0x1000000
MNT_FORCE = 0x80000
MNT_GETARGS = 0x400000
MNT_IGNORE = 0x100000
MNT_LAZY = 0x3
MNT_LOCAL = 0x1000
MNT_LOG = 0x2000000
MNT_NOATIME = 0x4000000
MNT_NOCOREDUMP = 0x8000
MNT_NODEV = 0x10
MNT_NODEVMTIME = 0x40000000
MNT_NOEXEC = 0x4
MNT_NOSUID = 0x8
MNT_NOWAIT = 0x2
MNT_OP_FLAGS = 0x4d0000
MNT_QUOTA = 0x2000
MNT_RDONLY = 0x1
MNT_RELATIME = 0x20000
MNT_RELOAD = 0x40000
MNT_ROOTFS = 0x4000
MNT_SOFTDEP = 0x80000000
MNT_SYMPERM = 0x20000000
MNT_SYNCHRONOUS = 0x2
MNT_UNION = 0x20
MNT_UPDATE = 0x10000
MNT_VISFLAGMASK = 0xff90ffff
MNT_WAIT = 0x1
MSG_BCAST = 0x100
MSG_CMSG_CLOEXEC = 0x800
MSG_CONTROLMBUF = 0x2000000
@ -1103,7 +1140,10 @@ const (
RLIMIT_CPU = 0x0
RLIMIT_DATA = 0x2
RLIMIT_FSIZE = 0x1
RLIMIT_MEMLOCK = 0x6
RLIMIT_NOFILE = 0x8
RLIMIT_NPROC = 0x7
RLIMIT_RSS = 0x5
RLIMIT_STACK = 0x3
RLIM_INFINITY = 0x7fffffffffffffff
RTAX_AUTHOR = 0x6

View file

@ -1000,6 +1000,43 @@ const (
MAP_STACK = 0x2000
MAP_TRYFIXED = 0x400
MAP_WIRED = 0x800
MNT_ASYNC = 0x40
MNT_BASIC_FLAGS = 0xe782807f
MNT_DEFEXPORTED = 0x200
MNT_DISCARD = 0x800000
MNT_EXKERB = 0x800
MNT_EXNORESPORT = 0x8000000
MNT_EXPORTANON = 0x400
MNT_EXPORTED = 0x100
MNT_EXPUBLIC = 0x10000000
MNT_EXRDONLY = 0x80
MNT_EXTATTR = 0x1000000
MNT_FORCE = 0x80000
MNT_GETARGS = 0x400000
MNT_IGNORE = 0x100000
MNT_LAZY = 0x3
MNT_LOCAL = 0x1000
MNT_LOG = 0x2000000
MNT_NOATIME = 0x4000000
MNT_NOCOREDUMP = 0x8000
MNT_NODEV = 0x10
MNT_NODEVMTIME = 0x40000000
MNT_NOEXEC = 0x4
MNT_NOSUID = 0x8
MNT_NOWAIT = 0x2
MNT_OP_FLAGS = 0x4d0000
MNT_QUOTA = 0x2000
MNT_RDONLY = 0x1
MNT_RELATIME = 0x20000
MNT_RELOAD = 0x40000
MNT_ROOTFS = 0x4000
MNT_SOFTDEP = 0x80000000
MNT_SYMPERM = 0x20000000
MNT_SYNCHRONOUS = 0x2
MNT_UNION = 0x20
MNT_UPDATE = 0x10000
MNT_VISFLAGMASK = 0xff90ffff
MNT_WAIT = 0x1
MSG_BCAST = 0x100
MSG_CMSG_CLOEXEC = 0x800
MSG_CONTROLMBUF = 0x2000000
@ -1093,7 +1130,10 @@ const (
RLIMIT_CPU = 0x0
RLIMIT_DATA = 0x2
RLIMIT_FSIZE = 0x1
RLIMIT_MEMLOCK = 0x6
RLIMIT_NOFILE = 0x8
RLIMIT_NPROC = 0x7
RLIMIT_RSS = 0x5
RLIMIT_STACK = 0x3
RLIM_INFINITY = 0x7fffffffffffffff
RTAX_AUTHOR = 0x6

View file

@ -899,6 +899,32 @@ const (
MAP_TRYFIXED = 0x400
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MNT_ASYNC = 0x40
MNT_DEFEXPORTED = 0x200
MNT_DELEXPORT = 0x20000
MNT_DOOMED = 0x8000000
MNT_EXPORTANON = 0x400
MNT_EXPORTED = 0x100
MNT_EXRDONLY = 0x80
MNT_FORCE = 0x80000
MNT_LAZY = 0x3
MNT_LOCAL = 0x1000
MNT_NOATIME = 0x8000
MNT_NODEV = 0x10
MNT_NOEXEC = 0x4
MNT_NOSUID = 0x8
MNT_NOWAIT = 0x2
MNT_QUOTA = 0x2000
MNT_RDONLY = 0x1
MNT_RELOAD = 0x40000
MNT_ROOTFS = 0x4000
MNT_SOFTDEP = 0x4000000
MNT_SYNCHRONOUS = 0x2
MNT_UPDATE = 0x10000
MNT_VISFLAGMASK = 0x400ffff
MNT_WAIT = 0x1
MNT_WANTRDWR = 0x2000000
MNT_WXALLOWED = 0x800
MSG_BCAST = 0x100
MSG_CTRUNC = 0x20
MSG_DONTROUTE = 0x4

View file

@ -939,6 +939,34 @@ const (
MAP_TRYFIXED = 0x0
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MNT_ASYNC = 0x40
MNT_DEFEXPORTED = 0x200
MNT_DELEXPORT = 0x20000
MNT_DOOMED = 0x8000000
MNT_EXPORTANON = 0x400
MNT_EXPORTED = 0x100
MNT_EXRDONLY = 0x80
MNT_FORCE = 0x80000
MNT_LAZY = 0x3
MNT_LOCAL = 0x1000
MNT_NOATIME = 0x8000
MNT_NODEV = 0x10
MNT_NOEXEC = 0x4
MNT_NOPERM = 0x20
MNT_NOSUID = 0x8
MNT_NOWAIT = 0x2
MNT_QUOTA = 0x2000
MNT_RDONLY = 0x1
MNT_RELOAD = 0x40000
MNT_ROOTFS = 0x4000
MNT_SOFTDEP = 0x4000000
MNT_STALLED = 0x100000
MNT_SYNCHRONOUS = 0x2
MNT_UPDATE = 0x10000
MNT_VISFLAGMASK = 0x400ffff
MNT_WAIT = 0x1
MNT_WANTRDWR = 0x2000000
MNT_WXALLOWED = 0x800
MSG_BCAST = 0x100
MSG_CMSG_CLOEXEC = 0x800
MSG_CTRUNC = 0x20
@ -1415,6 +1443,8 @@ const (
TIOCUCNTL_CBRK = 0x7a
TIOCUCNTL_SBRK = 0x7b
TOSTOP = 0x400000
UTIME_NOW = -0x2
UTIME_OMIT = -0x1
VDISCARD = 0xf
VDSUSP = 0xb
VEOF = 0x0

View file

@ -899,6 +899,32 @@ const (
MAP_TRYFIXED = 0x0
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MNT_ASYNC = 0x40
MNT_DEFEXPORTED = 0x200
MNT_DELEXPORT = 0x20000
MNT_DOOMED = 0x8000000
MNT_EXPORTANON = 0x400
MNT_EXPORTED = 0x100
MNT_EXRDONLY = 0x80
MNT_FORCE = 0x80000
MNT_LAZY = 0x3
MNT_LOCAL = 0x1000
MNT_NOATIME = 0x8000
MNT_NODEV = 0x10
MNT_NOEXEC = 0x4
MNT_NOSUID = 0x8
MNT_NOWAIT = 0x2
MNT_QUOTA = 0x2000
MNT_RDONLY = 0x1
MNT_RELOAD = 0x40000
MNT_ROOTFS = 0x4000
MNT_SOFTDEP = 0x4000000
MNT_SYNCHRONOUS = 0x2
MNT_UPDATE = 0x10000
MNT_VISFLAGMASK = 0x400ffff
MNT_WAIT = 0x1
MNT_WANTRDWR = 0x2000000
MNT_WXALLOWED = 0x800
MSG_BCAST = 0x100
MSG_CMSG_CLOEXEC = 0x800
MSG_CTRUNC = 0x20

View file

@ -39,8 +39,10 @@ int getrusage(int, uintptr_t);
int getsid(int);
int kill(int, int);
int syslog(int, uintptr_t, size_t);
int mkdir(int, uintptr_t, unsigned int);
int mkdirat(int, uintptr_t, unsigned int);
int mkfifo(uintptr_t, unsigned int);
int mknod(uintptr_t, unsigned int, int);
int mknodat(int, uintptr_t, unsigned int, int);
int nanosleep(uintptr_t, uintptr_t);
int open64(uintptr_t, int, unsigned int);
@ -502,6 +504,17 @@ func Klogctl(typ int, buf []byte) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mkdir(dirfd int, path string, mode uint32) (err error) {
_p0 := uintptr(unsafe.Pointer(C.CString(path)))
r0, er := C.mkdir(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode))
if r0 == -1 && er != nil {
err = er
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mkdirat(dirfd int, path string, mode uint32) (err error) {
_p0 := uintptr(unsafe.Pointer(C.CString(path)))
r0, er := C.mkdirat(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode))
@ -524,6 +537,17 @@ func Mkfifo(path string, mode uint32) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mknod(path string, mode uint32, dev int) (err error) {
_p0 := uintptr(unsafe.Pointer(C.CString(path)))
r0, er := C.mknod(C.uintptr_t(_p0), C.uint(mode), C.int(dev))
if r0 == -1 && er != nil {
err = er
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) {
_p0 := uintptr(unsafe.Pointer(C.CString(path)))
r0, er := C.mknodat(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode), C.int(dev))

View file

@ -39,8 +39,10 @@ int getrusage(int, uintptr_t);
int getsid(int);
int kill(int, int);
int syslog(int, uintptr_t, size_t);
int mkdir(int, uintptr_t, unsigned int);
int mkdirat(int, uintptr_t, unsigned int);
int mkfifo(uintptr_t, unsigned int);
int mknod(uintptr_t, unsigned int, int);
int mknodat(int, uintptr_t, unsigned int, int);
int nanosleep(uintptr_t, uintptr_t);
int open64(uintptr_t, int, unsigned int);
@ -502,6 +504,17 @@ func Klogctl(typ int, buf []byte) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mkdir(dirfd int, path string, mode uint32) (err error) {
_p0 := uintptr(unsafe.Pointer(C.CString(path)))
r0, er := C.mkdir(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode))
if r0 == -1 && er != nil {
err = er
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mkdirat(dirfd int, path string, mode uint32) (err error) {
_p0 := uintptr(unsafe.Pointer(C.CString(path)))
r0, er := C.mkdirat(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode))
@ -524,6 +537,17 @@ func Mkfifo(path string, mode uint32) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mknod(path string, mode uint32, dev int) (err error) {
_p0 := uintptr(unsafe.Pointer(C.CString(path)))
r0, er := C.mknod(C.uintptr_t(_p0), C.uint(mode), C.int(dev))
if r0 == -1 && er != nil {
err = er
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) {
_p0 := uintptr(unsafe.Pointer(C.CString(path)))
r0, er := C.mknodat(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode), C.int(dev))

View file

@ -417,6 +417,16 @@ func Chroot(path string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockGetres(clockid int32, res *Timespec) (err error) {
_, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockGettime(clockid int32, time *Timespec) (err error) {
_, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
if e1 != 0 {

View file

@ -417,6 +417,16 @@ func Chroot(path string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockGetres(clockid int32, res *Timespec) (err error) {
_, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockGettime(clockid int32, time *Timespec) (err error) {
_, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
if e1 != 0 {
@ -2286,3 +2296,18 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(cmdline)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

View file

@ -417,6 +417,16 @@ func Chroot(path string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockGetres(clockid int32, res *Timespec) (err error) {
_, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockGettime(clockid int32, time *Timespec) (err error) {
_, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
if e1 != 0 {

View file

@ -417,6 +417,16 @@ func Chroot(path string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockGetres(clockid int32, res *Timespec) (err error) {
_, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockGettime(clockid int32, time *Timespec) (err error) {
_, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
if e1 != 0 {

View file

@ -417,6 +417,16 @@ func Chroot(path string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockGetres(clockid int32, res *Timespec) (err error) {
_, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockGettime(clockid int32, time *Timespec) (err error) {
_, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
if e1 != 0 {

View file

@ -417,6 +417,16 @@ func Chroot(path string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockGetres(clockid int32, res *Timespec) (err error) {
_, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockGettime(clockid int32, time *Timespec) (err error) {
_, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
if e1 != 0 {

View file

@ -417,6 +417,16 @@ func Chroot(path string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockGetres(clockid int32, res *Timespec) (err error) {
_, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockGettime(clockid int32, time *Timespec) (err error) {
_, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
if e1 != 0 {

View file

@ -417,6 +417,16 @@ func Chroot(path string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockGetres(clockid int32, res *Timespec) (err error) {
_, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockGettime(clockid int32, time *Timespec) (err error) {
_, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
if e1 != 0 {

View file

@ -417,6 +417,16 @@ func Chroot(path string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockGetres(clockid int32, res *Timespec) (err error) {
_, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockGettime(clockid int32, time *Timespec) (err error) {
_, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
if e1 != 0 {
@ -979,7 +989,7 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func MemfdCreate(name string, flags uint) (fd int, err error) {
func MemfdCreate(name string, flags int) (fd int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(name)
if err != nil {
@ -2333,3 +2343,18 @@ func syncFileRange2(fd int, flags int, off int64, n int64) (err error) {
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(cmdline)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

View file

@ -417,6 +417,16 @@ func Chroot(path string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockGetres(clockid int32, res *Timespec) (err error) {
_, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockGettime(clockid int32, time *Timespec) (err error) {
_, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
if e1 != 0 {
@ -979,7 +989,7 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func MemfdCreate(name string, flags uint) (fd int, err error) {
func MemfdCreate(name string, flags int) (fd int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(name)
if err != nil {
@ -2333,3 +2343,18 @@ func syncFileRange2(fd int, flags int, off int64, n int64) (err error) {
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(cmdline)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

View file

@ -417,6 +417,16 @@ func Chroot(path string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockGetres(clockid int32, res *Timespec) (err error) {
_, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockGettime(clockid int32, time *Timespec) (err error) {
_, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
if e1 != 0 {
@ -979,7 +989,7 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func MemfdCreate(name string, flags uint) (fd int, err error) {
func MemfdCreate(name string, flags int) (fd int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(name)
if err != nil {

View file

@ -417,6 +417,16 @@ func Chroot(path string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockGetres(clockid int32, res *Timespec) (err error) {
_, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockGettime(clockid int32, time *Timespec) (err error) {
_, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
if e1 != 0 {
@ -979,7 +989,7 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func MemfdCreate(name string, flags uint) (fd int, err error) {
func MemfdCreate(name string, flags int) (fd int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(name)
if err != nil {
@ -2103,3 +2113,18 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(cmdline)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

File diff suppressed because it is too large Load diff

View file

@ -254,4 +254,17 @@ var sysctlMib = []mibentry{
{"net.mpls.ttl", []_C_int{4, 33, 2}},
{"net.pflow.stats", []_C_int{4, 34, 1}},
{"net.pipex.enable", []_C_int{4, 35, 1}},
{"vm.anonmin", []_C_int{2, 7}},
{"vm.loadavg", []_C_int{2, 2}},
{"vm.maxslp", []_C_int{2, 10}},
{"vm.nkmempages", []_C_int{2, 6}},
{"vm.psstrings", []_C_int{2, 3}},
{"vm.swapencrypt.enable", []_C_int{2, 5, 0}},
{"vm.swapencrypt.keyscreated", []_C_int{2, 5, 1}},
{"vm.swapencrypt.keysdeleted", []_C_int{2, 5, 2}},
{"vm.uspace", []_C_int{2, 11}},
{"vm.uvmexp", []_C_int{2, 4}},
{"vm.vmmeter", []_C_int{2, 1}},
{"vm.vnodemin", []_C_int{2, 9}},
{"vm.vtextmin", []_C_int{2, 8}},
}

View file

@ -6,11 +6,11 @@
package unix
const (
sizeofPtr = 0x4
sizeofShort = 0x2
sizeofInt = 0x4
sizeofLong = 0x4
sizeofLongLong = 0x8
SizeofPtr = 0x4
SizeofShort = 0x2
SizeofInt = 0x4
SizeofLong = 0x4
SizeofLongLong = 0x8
PathMax = 0x3ff
)
@ -268,9 +268,22 @@ type Termios struct {
Cc [16]uint8
}
type Termio struct{}
type Termio struct {
Iflag uint16
Oflag uint16
Cflag uint16
Lflag uint16
Line uint8
Cc [8]uint8
_ [1]byte
}
type Winsize struct{}
type Winsize struct {
Row uint16
Col uint16
Xpixel uint16
Ypixel uint16
}
type PollFd struct {
Fd int32
@ -301,6 +314,32 @@ type Flock_t struct {
Len int64
}
type Statfs_t struct{}
type Fsid_t struct {
Val [2]uint32
}
type Fsid64_t struct {
Val [2]uint64
}
type Statfs_t struct {
Version int32
Type int32
Bsize uint32
Blocks uint32
Bfree uint32
Bavail uint32
Files uint32
Ffree uint32
Fsid Fsid_t
Vfstype int32
Fsize uint32
Vfsnumber int32
Vfsoff int32
Vfslen int32
Vfsvers int32
Fname [32]uint8
Fpack [32]uint8
Name_max int32
}
const RNDGETENTCNT = 0x80045200

View file

@ -6,11 +6,11 @@
package unix
const (
sizeofPtr = 0x8
sizeofShort = 0x2
sizeofInt = 0x4
sizeofLong = 0x8
sizeofLongLong = 0x8
SizeofPtr = 0x8
SizeofShort = 0x2
SizeofInt = 0x4
SizeofLong = 0x8
SizeofLongLong = 0x8
PathMax = 0x3ff
)
@ -275,9 +275,22 @@ type Termios struct {
Cc [16]uint8
}
type Termio struct{}
type Termio struct {
Iflag uint16
Oflag uint16
Cflag uint16
Lflag uint16
Line uint8
Cc [8]uint8
_ [1]byte
}
type Winsize struct{}
type Winsize struct {
Row uint16
Col uint16
Xpixel uint16
Ypixel uint16
}
type PollFd struct {
Fd int32
@ -308,6 +321,34 @@ type Flock_t struct {
Len int64
}
type Statfs_t struct{}
type Fsid_t struct {
Val [2]uint32
}
type Fsid64_t struct {
Val [2]uint64
}
type Statfs_t struct {
Version int32
Type int32
Bsize uint64
Blocks uint64
Bfree uint64
Bavail uint64
Files uint64
Ffree uint64
Fsid Fsid64_t
Vfstype int32
_ [4]byte
Fsize uint64
Vfsnumber int32
Vfsoff int32
Vfslen int32
Vfsvers int32
Fname [32]uint8
Fpack [32]uint8
Name_max int32
_ [4]byte
}
const RNDGETENTCNT = 0x80045200

View file

@ -6,11 +6,11 @@
package unix
const (
sizeofPtr = 0x4
sizeofShort = 0x2
sizeofInt = 0x4
sizeofLong = 0x4
sizeofLongLong = 0x8
SizeofPtr = 0x4
SizeofShort = 0x2
SizeofInt = 0x4
SizeofLong = 0x4
SizeofLongLong = 0x8
)
type (

Some files were not shown because too many files have changed in this diff Show more