query_docstring
stringlengths 24
20.8k
| positive_code
stringlengths 17
325k
| hard_negative_code
stringlengths 17
325k
| similarity_score
float64 0.3
1
| query_repo
stringclasses 407
values | query_path
stringlengths 5
170
| hn_repo
stringclasses 400
values | hn_path
stringlengths 5
170
| hn_license
stringclasses 4
values | language
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|
CreateSignalMapRequest generates a "aws/request.Request" representing the
client's request for the CreateSignalMap operation. The "output" return
value will be populated with the request's response once the request completes
successfully.
Use "Send" method on the returned Request to send the API call to the service.
the "output" return value is not valid until after Send returns without error.
See CreateSignalMap for more information on using the CreateSignalMap
API call, and error handling.
This method is useful when you want to inject custom logic or configuration
into the SDK's request lifecycle. Such as custom headers, or retry logic.
// Example sending a request using the CreateSignalMapRequest method.
req, resp := client.CreateSignalMapRequest(params)
err := req.Send()
if err == nil { // resp is now filled
fmt.Println(resp)
}
See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/CreateSignalMap | func (c *MediaLive) CreateSignalMapRequest(input *CreateSignalMapInput) (req *request.Request, output *CreateSignalMapOutput) {
op := &request.Operation{
Name: opCreateSignalMap,
HTTPMethod: "POST",
HTTPPath: "/prod/signal-maps",
}
if input == nil {
input = &CreateSignalMapInput{}
}
output = &CreateSignalMapOutput{}
req = c.newRequest(op, input, output)
return
} | func (c *MediaLive) GetSignalMapRequest(input *GetSignalMapInput) (req *request.Request, output *GetSignalMapOutput) {
op := &request.Operation{
Name: opGetSignalMap,
HTTPMethod: "GET",
HTTPPath: "/prod/signal-maps/{identifier}",
}
if input == nil {
input = &GetSignalMapInput{}
}
output = &GetSignalMapOutput{}
req = c.newRequest(op, input, output)
return
} | 0.901887 | aws/aws-sdk-go | service/medialive/api.go | aws/aws-sdk-go | service/medialive/api.go | Apache-2.0 | go |
CreateSignalMapRequest generates a "aws/request.Request" representing the
client's request for the CreateSignalMap operation. The "output" return
value will be populated with the request's response once the request completes
successfully.
Use "Send" method on the returned Request to send the API call to the service.
the "output" return value is not valid until after Send returns without error.
See CreateSignalMap for more information on using the CreateSignalMap
API call, and error handling.
This method is useful when you want to inject custom logic or configuration
into the SDK's request lifecycle. Such as custom headers, or retry logic.
// Example sending a request using the CreateSignalMapRequest method.
req, resp := client.CreateSignalMapRequest(params)
err := req.Send()
if err == nil { // resp is now filled
fmt.Println(resp)
}
See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/CreateSignalMap | func (c *MediaLive) CreateSignalMapRequest(input *CreateSignalMapInput) (req *request.Request, output *CreateSignalMapOutput) {
op := &request.Operation{
Name: opCreateSignalMap,
HTTPMethod: "POST",
HTTPPath: "/prod/signal-maps",
}
if input == nil {
input = &CreateSignalMapInput{}
}
output = &CreateSignalMapOutput{}
req = c.newRequest(op, input, output)
return
} | func (c *MediaLive) DeleteSignalMapRequest(input *DeleteSignalMapInput) (req *request.Request, output *DeleteSignalMapOutput) {
op := &request.Operation{
Name: opDeleteSignalMap,
HTTPMethod: "DELETE",
HTTPPath: "/prod/signal-maps/{identifier}",
}
if input == nil {
input = &DeleteSignalMapInput{}
}
output = &DeleteSignalMapOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
} | 0.880021 | aws/aws-sdk-go | service/medialive/api.go | aws/aws-sdk-go | service/medialive/api.go | Apache-2.0 | go |
CreateSignalMapRequest generates a "aws/request.Request" representing the
client's request for the CreateSignalMap operation. The "output" return
value will be populated with the request's response once the request completes
successfully.
Use "Send" method on the returned Request to send the API call to the service.
the "output" return value is not valid until after Send returns without error.
See CreateSignalMap for more information on using the CreateSignalMap
API call, and error handling.
This method is useful when you want to inject custom logic or configuration
into the SDK's request lifecycle. Such as custom headers, or retry logic.
// Example sending a request using the CreateSignalMapRequest method.
req, resp := client.CreateSignalMapRequest(params)
err := req.Send()
if err == nil { // resp is now filled
fmt.Println(resp)
}
See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/CreateSignalMap | func (c *MediaLive) CreateSignalMapRequest(input *CreateSignalMapInput) (req *request.Request, output *CreateSignalMapOutput) {
op := &request.Operation{
Name: opCreateSignalMap,
HTTPMethod: "POST",
HTTPPath: "/prod/signal-maps",
}
if input == nil {
input = &CreateSignalMapInput{}
}
output = &CreateSignalMapOutput{}
req = c.newRequest(op, input, output)
return
} | func (c *MediaLive) StartUpdateSignalMapRequest(input *StartUpdateSignalMapInput) (req *request.Request, output *StartUpdateSignalMapOutput) {
op := &request.Operation{
Name: opStartUpdateSignalMap,
HTTPMethod: "PATCH",
HTTPPath: "/prod/signal-maps/{identifier}",
}
if input == nil {
input = &StartUpdateSignalMapInput{}
}
output = &StartUpdateSignalMapOutput{}
req = c.newRequest(op, input, output)
return
} | 0.855243 | aws/aws-sdk-go | service/medialive/api.go | aws/aws-sdk-go | service/medialive/api.go | Apache-2.0 | go |
CreateSignalMapRequest generates a "aws/request.Request" representing the
client's request for the CreateSignalMap operation. The "output" return
value will be populated with the request's response once the request completes
successfully.
Use "Send" method on the returned Request to send the API call to the service.
the "output" return value is not valid until after Send returns without error.
See CreateSignalMap for more information on using the CreateSignalMap
API call, and error handling.
This method is useful when you want to inject custom logic or configuration
into the SDK's request lifecycle. Such as custom headers, or retry logic.
// Example sending a request using the CreateSignalMapRequest method.
req, resp := client.CreateSignalMapRequest(params)
err := req.Send()
if err == nil { // resp is now filled
fmt.Println(resp)
}
See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/CreateSignalMap | func (c *MediaLive) CreateSignalMapRequest(input *CreateSignalMapInput) (req *request.Request, output *CreateSignalMapOutput) {
op := &request.Operation{
Name: opCreateSignalMap,
HTTPMethod: "POST",
HTTPPath: "/prod/signal-maps",
}
if input == nil {
input = &CreateSignalMapInput{}
}
output = &CreateSignalMapOutput{}
req = c.newRequest(op, input, output)
return
} | func (c *MediaLive) CreateSignalMapWithContext(ctx aws.Context, input *CreateSignalMapInput, opts ...request.Option) (*CreateSignalMapOutput, error) {
req, out := c.CreateSignalMapRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.816864 | aws/aws-sdk-go | service/medialive/api.go | aws/aws-sdk-go | service/medialive/api.go | Apache-2.0 | go |
CreateSignalMapRequest generates a "aws/request.Request" representing the
client's request for the CreateSignalMap operation. The "output" return
value will be populated with the request's response once the request completes
successfully.
Use "Send" method on the returned Request to send the API call to the service.
the "output" return value is not valid until after Send returns without error.
See CreateSignalMap for more information on using the CreateSignalMap
API call, and error handling.
This method is useful when you want to inject custom logic or configuration
into the SDK's request lifecycle. Such as custom headers, or retry logic.
// Example sending a request using the CreateSignalMapRequest method.
req, resp := client.CreateSignalMapRequest(params)
err := req.Send()
if err == nil { // resp is now filled
fmt.Println(resp)
}
See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/CreateSignalMap | func (c *MediaLive) CreateSignalMapRequest(input *CreateSignalMapInput) (req *request.Request, output *CreateSignalMapOutput) {
op := &request.Operation{
Name: opCreateSignalMap,
HTTPMethod: "POST",
HTTPPath: "/prod/signal-maps",
}
if input == nil {
input = &CreateSignalMapInput{}
}
output = &CreateSignalMapOutput{}
req = c.newRequest(op, input, output)
return
} | func (c *MediaLive) ListSignalMapsRequest(input *ListSignalMapsInput) (req *request.Request, output *ListSignalMapsOutput) {
op := &request.Operation{
Name: opListSignalMaps,
HTTPMethod: "GET",
HTTPPath: "/prod/signal-maps",
Paginator: &request.Paginator{
InputTokens: []string{"NextToken"},
OutputTokens: []string{"NextToken"},
LimitToken: "MaxResults",
TruncationToken: "",
},
}
if input == nil {
input = &ListSignalMapsInput{}
}
output = &ListSignalMapsOutput{}
req = c.newRequest(op, input, output)
return
} | 0.810775 | aws/aws-sdk-go | service/medialive/api.go | aws/aws-sdk-go | service/medialive/api.go | Apache-2.0 | go |
The main parser program.
The first argument is a pointer to a structure obtained from
"sqlite3ParserAlloc" which describes the current state of the parser.
The second argument is the major token number. The third is
the minor token. The fourth optional argument is whatever the
user wants (and specified in the grammar) and is available for
use by the action routines.
Inputs:
<ul>
<li> A pointer to the parser (an opaque structure.)
<li> The major token number.
<li> The minor token number.
<li> An option argument of a grammar-specified type.
</ul>
Outputs:
None. | func Xsqlite3Parser(tls *libc.TLS, yyp uintptr, yymajor int32, yyminor Token) {
bp := tls.Alloc(16)
defer tls.Free(16)
var yyact uint16
var yypParser uintptr = yyp
var pParse uintptr = (*YyParser)(unsafe.Pointer(yypParser)).FpParse
yyact = (*YyStackEntry)(unsafe.Pointer((*YyParser)(unsafe.Pointer(yypParser)).Fyytos)).Fstateno
for 1 != 0 {
yyact = yy_find_shift_action(tls, uint16(yymajor), yyact)
if int32(yyact) >= YY_MIN_REDUCE {
var yyruleno uint32 = uint32(int32(yyact) - YY_MIN_REDUCE)
if int32(yyRuleInfoNRhs[yyruleno]) == 0 {
if (*YyParser)(unsafe.Pointer(yypParser)).Fyytos >= (*YyParser)(unsafe.Pointer(yypParser)).FyystackEnd {
yyStackOverflow(tls, yypParser)
break
}
}
yyact = yy_reduce(tls, yypParser, yyruleno, yymajor, yyminor, pParse)
} else if int32(yyact) <= YY_MAX_SHIFTREDUCE {
yy_shift(tls, yypParser, yyact, uint16(yymajor), yyminor)
break
} else if int32(yyact) == YY_ACCEPT_ACTION {
(*YyParser)(unsafe.Pointer(yypParser)).Fyytos -= 24
yy_accept(tls, yypParser)
return
} else {
*(*Token)(unsafe.Pointer(bp)) = yyminor
yy_syntax_error(tls, yypParser, yymajor, yyminor)
yy_destructor(tls, yypParser, uint16(yymajor), bp)
break
}
}
return
} | func _sqlite3Parser(tls *libc.TLS, yyp uintptr, yymajor int32, yyminor TToken) {
bp := tls.Alloc(16)
defer tls.Free(16)
var pParse, yypParser uintptr
var yyact uint16
var yyruleno uint32
var _ /* yyminorunion at bp+0 */ TYYMINORTYPE
_, _, _, _ = pParse, yyact, yypParser, yyruleno /* The parser action. */
yypParser = yyp /* The parser */
pParse = (*TyyParser)(unsafe.Pointer(yypParser)).FpParse
yyact = (*TyyStackEntry)(unsafe.Pointer((*TyyParser)(unsafe.Pointer(yypParser)).Fyytos)).Fstateno
for int32(1) != 0 { /* Exit by "break" */
yyact = _yy_find_shift_action(tls, libc.Uint16FromInt32(yymajor), yyact)
if libc.Int32FromUint16(yyact) >= int32(YY_MIN_REDUCE) {
yyruleno = libc.Uint32FromInt32(libc.Int32FromUint16(yyact) - int32(YY_MIN_REDUCE)) /* Reduce by this rule */
/* Check that the stack is large enough to grow by a single entry
** if the RHS of the rule is empty. This ensures that there is room
** enough on the stack to push the LHS value */
if int32(_yyRuleInfoNRhs[yyruleno]) == 0 {
if (*TyyParser)(unsafe.Pointer(yypParser)).Fyytos >= (*TyyParser)(unsafe.Pointer(yypParser)).FyystackEnd {
if _yyGrowStack(tls, yypParser) != 0 {
_yyStackOverflow(tls, yypParser)
break
}
}
}
yyact = _yy_reduce(tls, yypParser, yyruleno, yymajor, yyminor, pParse)
} else {
if libc.Int32FromUint16(yyact) <= int32(YY_MAX_SHIFTREDUCE) {
_yy_shift(tls, yypParser, yyact, libc.Uint16FromInt32(yymajor), yyminor)
break
} else {
if libc.Int32FromUint16(yyact) == int32(YY_ACCEPT_ACTION) {
(*TyyParser)(unsafe.Pointer(yypParser)).Fyytos -= 24
_yy_accept(tls, yypParser)
return
} else {
*(*TToken)(unsafe.Pointer(bp)) = yyminor
/* If the YYNOERRORRECOVERY macro is defined, then do not attempt to
** do any kind of error recovery. Instead, simply invoke the syntax
** error routine and continue going as if nothing had happened.
**
** Applications can set this macro (for example inside %include) if
** they intend to abandon the parse upon the first syntax error seen.
*/
_yy_syntax_error(tls, yypParser, yymajor, yyminor)
_yy_destructor(tls, yypParser, libc.Uint16FromInt32(yymajor), bp)
break
}
}
}
}
return
} | 0.740306 | 42wim/matterbridge | vendor/modernc.org/sqlite/lib/sqlite_openbsd_arm64.go | umputun/spot | vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go | MIT | go |
The main parser program.
The first argument is a pointer to a structure obtained from
"sqlite3ParserAlloc" which describes the current state of the parser.
The second argument is the major token number. The third is
the minor token. The fourth optional argument is whatever the
user wants (and specified in the grammar) and is available for
use by the action routines.
Inputs:
<ul>
<li> A pointer to the parser (an opaque structure.)
<li> The major token number.
<li> The minor token number.
<li> An option argument of a grammar-specified type.
</ul>
Outputs:
None. | func Xsqlite3Parser(tls *libc.TLS, yyp uintptr, yymajor int32, yyminor Token) {
bp := tls.Alloc(16)
defer tls.Free(16)
var yyact uint16
var yypParser uintptr = yyp
var pParse uintptr = (*YyParser)(unsafe.Pointer(yypParser)).FpParse
yyact = (*YyStackEntry)(unsafe.Pointer((*YyParser)(unsafe.Pointer(yypParser)).Fyytos)).Fstateno
for 1 != 0 {
yyact = yy_find_shift_action(tls, uint16(yymajor), yyact)
if int32(yyact) >= YY_MIN_REDUCE {
var yyruleno uint32 = uint32(int32(yyact) - YY_MIN_REDUCE)
if int32(yyRuleInfoNRhs[yyruleno]) == 0 {
if (*YyParser)(unsafe.Pointer(yypParser)).Fyytos >= (*YyParser)(unsafe.Pointer(yypParser)).FyystackEnd {
yyStackOverflow(tls, yypParser)
break
}
}
yyact = yy_reduce(tls, yypParser, yyruleno, yymajor, yyminor, pParse)
} else if int32(yyact) <= YY_MAX_SHIFTREDUCE {
yy_shift(tls, yypParser, yyact, uint16(yymajor), yyminor)
break
} else if int32(yyact) == YY_ACCEPT_ACTION {
(*YyParser)(unsafe.Pointer(yypParser)).Fyytos -= 24
yy_accept(tls, yypParser)
return
} else {
*(*Token)(unsafe.Pointer(bp)) = yyminor
yy_syntax_error(tls, yypParser, yymajor, yyminor)
yy_destructor(tls, yypParser, uint16(yymajor), bp)
break
}
}
return
} | func _sqlite3Fts5Parser(tls *libc.TLS, fts5yyp uintptr, fts5yymajor int32, fts5yyminor TFts5Token, pParse uintptr) {
bp := tls.Alloc(16)
defer tls.Free(16)
var fts5yyact uint8
var fts5yypParser uintptr
var fts5yyruleno uint32
var _ /* fts5yyminorunion at bp+0 */ Tfts5YYMINORTYPE
_, _, _ = fts5yyact, fts5yypParser, fts5yyruleno /* The parser action. */
fts5yypParser = fts5yyp /* The parser */
(*Tfts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse
fts5yyact = (*Tfts5yyStackEntry)(unsafe.Pointer((*Tfts5yyParser)(unsafe.Pointer(fts5yypParser)).Ffts5yytos)).Fstateno
for int32(1) != 0 { /* Exit by "break" */
fts5yyact = _fts5yy_find_shift_action(tls, libc.Uint8FromInt32(fts5yymajor), fts5yyact)
if libc.Int32FromUint8(fts5yyact) >= int32(fts5YY_MIN_REDUCE) {
fts5yyruleno = libc.Uint32FromInt32(libc.Int32FromUint8(fts5yyact) - int32(fts5YY_MIN_REDUCE)) /* Reduce by this rule */
/* Check that the stack is large enough to grow by a single entry
** if the RHS of the rule is empty. This ensures that there is room
** enough on the stack to push the LHS value */
if int32(_fts5yyRuleInfoNRhs[fts5yyruleno]) == 0 {
if (*Tfts5yyParser)(unsafe.Pointer(fts5yypParser)).Ffts5yytos >= (*Tfts5yyParser)(unsafe.Pointer(fts5yypParser)).Ffts5yystackEnd {
if int32(1) != 0 {
_fts5yyStackOverflow(tls, fts5yypParser)
break
}
}
}
fts5yyact = _fts5yy_reduce(tls, fts5yypParser, fts5yyruleno, fts5yymajor, fts5yyminor)
} else {
if libc.Int32FromUint8(fts5yyact) <= int32(fts5YY_MAX_SHIFTREDUCE) {
_fts5yy_shift(tls, fts5yypParser, fts5yyact, libc.Uint8FromInt32(fts5yymajor), fts5yyminor)
break
} else {
if libc.Int32FromUint8(fts5yyact) == int32(fts5YY_ACCEPT_ACTION) {
(*Tfts5yyParser)(unsafe.Pointer(fts5yypParser)).Ffts5yytos -= 24
_fts5yy_accept(tls, fts5yypParser)
return
} else {
*(*TFts5Token)(unsafe.Pointer(bp)) = fts5yyminor
/* If the fts5YYNOERRORRECOVERY macro is defined, then do not attempt to
** do any kind of error recovery. Instead, simply invoke the syntax
** error routine and continue going as if nothing had happened.
**
** Applications can set this macro (for example inside %include) if
** they intend to abandon the parse upon the first syntax error seen.
*/
_fts5yy_syntax_error(tls, fts5yypParser, fts5yymajor, fts5yyminor)
_fts5yy_destructor(tls, fts5yypParser, libc.Uint8FromInt32(fts5yymajor), bp)
break
}
}
}
}
return
} | 0.699471 | 42wim/matterbridge | vendor/modernc.org/sqlite/lib/sqlite_openbsd_arm64.go | umputun/spot | vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go | MIT | go |
The main parser program.
The first argument is a pointer to a structure obtained from
"sqlite3ParserAlloc" which describes the current state of the parser.
The second argument is the major token number. The third is
the minor token. The fourth optional argument is whatever the
user wants (and specified in the grammar) and is available for
use by the action routines.
Inputs:
<ul>
<li> A pointer to the parser (an opaque structure.)
<li> The major token number.
<li> The minor token number.
<li> An option argument of a grammar-specified type.
</ul>
Outputs:
None. | func Xsqlite3Parser(tls *libc.TLS, yyp uintptr, yymajor int32, yyminor Token) {
bp := tls.Alloc(16)
defer tls.Free(16)
var yyact uint16
var yypParser uintptr = yyp
var pParse uintptr = (*YyParser)(unsafe.Pointer(yypParser)).FpParse
yyact = (*YyStackEntry)(unsafe.Pointer((*YyParser)(unsafe.Pointer(yypParser)).Fyytos)).Fstateno
for 1 != 0 {
yyact = yy_find_shift_action(tls, uint16(yymajor), yyact)
if int32(yyact) >= YY_MIN_REDUCE {
var yyruleno uint32 = uint32(int32(yyact) - YY_MIN_REDUCE)
if int32(yyRuleInfoNRhs[yyruleno]) == 0 {
if (*YyParser)(unsafe.Pointer(yypParser)).Fyytos >= (*YyParser)(unsafe.Pointer(yypParser)).FyystackEnd {
yyStackOverflow(tls, yypParser)
break
}
}
yyact = yy_reduce(tls, yypParser, yyruleno, yymajor, yyminor, pParse)
} else if int32(yyact) <= YY_MAX_SHIFTREDUCE {
yy_shift(tls, yypParser, yyact, uint16(yymajor), yyminor)
break
} else if int32(yyact) == YY_ACCEPT_ACTION {
(*YyParser)(unsafe.Pointer(yypParser)).Fyytos -= 24
yy_accept(tls, yypParser)
return
} else {
*(*Token)(unsafe.Pointer(bp)) = yyminor
yy_syntax_error(tls, yypParser, yymajor, yyminor)
yy_destructor(tls, yypParser, uint16(yymajor), bp)
break
}
}
return
} | func Xsqlite3InitCallback(tls *libc.TLS, pInit uintptr, argc int32, argv uintptr, NotUsed uintptr) int32 {
bp := tls.Alloc(8)
defer tls.Free(8)
var pData uintptr = pInit
var db uintptr = (*InitData)(unsafe.Pointer(pData)).Fdb
var iDb int32 = (*InitData)(unsafe.Pointer(pData)).FiDb
_ = NotUsed
_ = argc
*(*U32)(unsafe.Pointer(db + 44)) |= U32(DBFLAG_EncodingFixed)
if argv == uintptr(0) {
return 0
}
(*InitData)(unsafe.Pointer(pData)).FnInitRow++
if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 {
corruptSchema(tls, pData, argv, uintptr(0))
return 1
}
if *(*uintptr)(unsafe.Pointer(argv + 3*8)) == uintptr(0) {
corruptSchema(tls, pData, argv, uintptr(0))
} else if *(*uintptr)(unsafe.Pointer(argv + 4*8)) != 0 &&
'c' == int32(Xsqlite3UpperToLower[uint8(*(*int8)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(argv + 4*8)))))]) &&
'r' == int32(Xsqlite3UpperToLower[uint8(*(*int8)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(argv + 4*8)) + 1)))]) {
var rc int32
var saved_iDb U8 = (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb
(*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(iDb)
if Xsqlite3GetUInt32(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8)), db+192) == 0 ||
(*Sqlite3)(unsafe.Pointer(db)).Finit.FnewTnum > (*InitData)(unsafe.Pointer(pData)).FmxPage && (*InitData)(unsafe.Pointer(pData)).FmxPage > Pgno(0) {
if Xsqlite3Config.FbExtraSchemaChecks != 0 {
corruptSchema(tls, pData, argv, ts+14143)
}
}
libc.SetBitFieldPtr8Uint32(db+192+8, uint32(0), 0, 0x1)
(*Sqlite3)(unsafe.Pointer(db)).Finit.FazInit = argv
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
sqlite3Prepare(tls, db, *(*uintptr)(unsafe.Pointer(argv + 4*8)), -1, uint32(0), uintptr(0), bp, uintptr(0))
rc = (*Sqlite3)(unsafe.Pointer(db)).FerrCode
(*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = saved_iDb
if SQLITE_OK != rc {
if uint32(int32(*(*uint8)(unsafe.Pointer(db + 192 + 8))&0x1>>0)) != 0 {
} else {
if rc > (*InitData)(unsafe.Pointer(pData)).Frc {
(*InitData)(unsafe.Pointer(pData)).Frc = rc
}
if rc == SQLITE_NOMEM {
Xsqlite3OomFault(tls, db)
} else if rc != SQLITE_INTERRUPT && rc&0xFF != SQLITE_LOCKED {
corruptSchema(tls, pData, argv, Xsqlite3_errmsg(tls, db))
}
}
}
(*Sqlite3)(unsafe.Pointer(db)).Finit.FazInit = uintptr(unsafe.Pointer(&Xsqlite3StdType))
Xsqlite3_finalize(tls, *(*uintptr)(unsafe.Pointer(bp)))
} else if *(*uintptr)(unsafe.Pointer(argv + 1*8)) == uintptr(0) || *(*uintptr)(unsafe.Pointer(argv + 4*8)) != uintptr(0) && int32(*(*int8)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(argv + 4*8))))) != 0 {
corruptSchema(tls, pData, argv, uintptr(0))
} else {
var pIndex uintptr
pIndex = Xsqlite3FindIndex(tls, db, *(*uintptr)(unsafe.Pointer(argv + 1*8)), (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName)
if pIndex == uintptr(0) {
corruptSchema(tls, pData, argv, ts+18342)
} else if Xsqlite3GetUInt32(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8)), pIndex+88) == 0 ||
(*Index)(unsafe.Pointer(pIndex)).Ftnum < Pgno(2) ||
(*Index)(unsafe.Pointer(pIndex)).Ftnum > (*InitData)(unsafe.Pointer(pData)).FmxPage ||
Xsqlite3IndexHasDuplicateRootPage(tls, pIndex) != 0 {
if Xsqlite3Config.FbExtraSchemaChecks != 0 {
corruptSchema(tls, pData, argv, ts+14143)
}
}
}
return 0
} | 0.602772 | 42wim/matterbridge | vendor/modernc.org/sqlite/lib/sqlite_openbsd_arm64.go | 42wim/matterbridge | vendor/modernc.org/sqlite/lib/sqlite_openbsd_arm64.go | Apache-2.0 | go |
The main parser program.
The first argument is a pointer to a structure obtained from
"sqlite3ParserAlloc" which describes the current state of the parser.
The second argument is the major token number. The third is
the minor token. The fourth optional argument is whatever the
user wants (and specified in the grammar) and is available for
use by the action routines.
Inputs:
<ul>
<li> A pointer to the parser (an opaque structure.)
<li> The major token number.
<li> The minor token number.
<li> An option argument of a grammar-specified type.
</ul>
Outputs:
None. | func Xsqlite3Parser(tls *libc.TLS, yyp uintptr, yymajor int32, yyminor Token) {
bp := tls.Alloc(16)
defer tls.Free(16)
var yyact uint16
var yypParser uintptr = yyp
var pParse uintptr = (*YyParser)(unsafe.Pointer(yypParser)).FpParse
yyact = (*YyStackEntry)(unsafe.Pointer((*YyParser)(unsafe.Pointer(yypParser)).Fyytos)).Fstateno
for 1 != 0 {
yyact = yy_find_shift_action(tls, uint16(yymajor), yyact)
if int32(yyact) >= YY_MIN_REDUCE {
var yyruleno uint32 = uint32(int32(yyact) - YY_MIN_REDUCE)
if int32(yyRuleInfoNRhs[yyruleno]) == 0 {
if (*YyParser)(unsafe.Pointer(yypParser)).Fyytos >= (*YyParser)(unsafe.Pointer(yypParser)).FyystackEnd {
yyStackOverflow(tls, yypParser)
break
}
}
yyact = yy_reduce(tls, yypParser, yyruleno, yymajor, yyminor, pParse)
} else if int32(yyact) <= YY_MAX_SHIFTREDUCE {
yy_shift(tls, yypParser, yyact, uint16(yymajor), yyminor)
break
} else if int32(yyact) == YY_ACCEPT_ACTION {
(*YyParser)(unsafe.Pointer(yypParser)).Fyytos -= 24
yy_accept(tls, yypParser)
return
} else {
*(*Token)(unsafe.Pointer(bp)) = yyminor
yy_syntax_error(tls, yypParser, yymajor, yyminor)
yy_destructor(tls, yypParser, uint16(yymajor), bp)
break
}
}
return
} | func Xsqlite3FinishCoding(tls *libc.TLS, pParse uintptr) {
var db uintptr
var v uintptr
var iDb int32
var i int32
db = (*Parse)(unsafe.Pointer(pParse)).Fdb
if (*Parse)(unsafe.Pointer(pParse)).Fnested != 0 {
return
}
if (*Parse)(unsafe.Pointer(pParse)).FnErr != 0 {
if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 {
(*Parse)(unsafe.Pointer(pParse)).Frc = SQLITE_NOMEM
}
return
}
v = (*Parse)(unsafe.Pointer(pParse)).FpVdbe
if v == uintptr(0) {
if (*Sqlite3)(unsafe.Pointer(db)).Finit.Fbusy != 0 {
(*Parse)(unsafe.Pointer(pParse)).Frc = SQLITE_DONE
return
}
v = Xsqlite3GetVdbe(tls, pParse)
if v == uintptr(0) {
(*Parse)(unsafe.Pointer(pParse)).Frc = SQLITE_ERROR
}
}
if v != 0 {
if (*Parse)(unsafe.Pointer(pParse)).FbReturning != 0 {
var pReturning uintptr = *(*uintptr)(unsafe.Pointer(pParse + 200))
var addrRewind int32
var reg int32
if (*Returning)(unsafe.Pointer(pReturning)).FnRetCol != 0 {
Xsqlite3VdbeAddOp0(tls, v, OP_FkCheck)
addrRewind = Xsqlite3VdbeAddOp1(tls, v, OP_Rewind, (*Returning)(unsafe.Pointer(pReturning)).FiRetCur)
reg = (*Returning)(unsafe.Pointer(pReturning)).FiRetReg
for i = 0; i < (*Returning)(unsafe.Pointer(pReturning)).FnRetCol; i++ {
Xsqlite3VdbeAddOp3(tls, v, OP_Column, (*Returning)(unsafe.Pointer(pReturning)).FiRetCur, i, reg+i)
}
Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, reg, i)
Xsqlite3VdbeAddOp2(tls, v, OP_Next, (*Returning)(unsafe.Pointer(pReturning)).FiRetCur, addrRewind+1)
Xsqlite3VdbeJumpHere(tls, v, addrRewind)
}
}
Xsqlite3VdbeAddOp0(tls, v, OP_Halt)
Xsqlite3VdbeJumpHere(tls, v, 0)
iDb = 0
for __ccgo := true; __ccgo; __ccgo = libc.PreIncInt32(&iDb, 1) < (*Sqlite3)(unsafe.Pointer(db)).FnDb {
var pSchema uintptr
if libc.Bool32((*Parse)(unsafe.Pointer(pParse)).FcookieMask&(YDbMask(1)<<iDb) != YDbMask(0)) == 0 {
continue
}
Xsqlite3VdbeUsesBtree(tls, v, iDb)
pSchema = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(iDb)*32)).FpSchema
Xsqlite3VdbeAddOp4Int(tls, v,
OP_Transaction,
iDb,
libc.Bool32((*Parse)(unsafe.Pointer(pParse)).FwriteMask&(YDbMask(1)<<iDb) != YDbMask(0)),
(*Schema)(unsafe.Pointer(pSchema)).Fschema_cookie,
(*Schema)(unsafe.Pointer(pSchema)).FiGeneration)
if int32((*Sqlite3)(unsafe.Pointer(db)).Finit.Fbusy) == 0 {
Xsqlite3VdbeChangeP5(tls, v, uint16(1))
}
}
for i = 0; i < (*Parse)(unsafe.Pointer(pParse)).FnVtabLock; i++ {
var vtab uintptr = Xsqlite3GetVTable(tls, db, *(*uintptr)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).FapVtabLock + uintptr(i)*8)))
Xsqlite3VdbeAddOp4(tls, v, OP_VBegin, 0, 0, 0, vtab, -11)
}
(*Parse)(unsafe.Pointer(pParse)).FnVtabLock = 0
codeTableLocks(tls, pParse)
Xsqlite3AutoincrementBegin(tls, pParse)
if (*Parse)(unsafe.Pointer(pParse)).FpConstExpr != 0 {
var pEL uintptr = (*Parse)(unsafe.Pointer(pParse)).FpConstExpr
(*Parse)(unsafe.Pointer(pParse)).FokConstFactor = U8(0)
for i = 0; i < (*ExprList)(unsafe.Pointer(pEL)).FnExpr; i++ {
var iReg int32 = *(*int32)(unsafe.Pointer(pEL + 8 + uintptr(i)*32 + 24))
Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(pEL+8+uintptr(i)*32)).FpExpr, iReg)
}
}
if (*Parse)(unsafe.Pointer(pParse)).FbReturning != 0 {
var pRet uintptr = *(*uintptr)(unsafe.Pointer(pParse + 200))
if (*Returning)(unsafe.Pointer(pRet)).FnRetCol != 0 {
Xsqlite3VdbeAddOp2(tls, v, OP_OpenEphemeral, (*Returning)(unsafe.Pointer(pRet)).FiRetCur, (*Returning)(unsafe.Pointer(pRet)).FnRetCol)
}
}
Xsqlite3VdbeGoto(tls, v, 1)
}
if (*Parse)(unsafe.Pointer(pParse)).FnErr == 0 {
Xsqlite3VdbeMakeReady(tls, v, pParse)
(*Parse)(unsafe.Pointer(pParse)).Frc = SQLITE_DONE
} else {
(*Parse)(unsafe.Pointer(pParse)).Frc = SQLITE_ERROR
}
} | 0.586793 | 42wim/matterbridge | vendor/modernc.org/sqlite/lib/sqlite_openbsd_arm64.go | 42wim/matterbridge | vendor/modernc.org/sqlite/lib/sqlite_openbsd_arm64.go | Apache-2.0 | go |
The main parser program.
The first argument is a pointer to a structure obtained from
"sqlite3ParserAlloc" which describes the current state of the parser.
The second argument is the major token number. The third is
the minor token. The fourth optional argument is whatever the
user wants (and specified in the grammar) and is available for
use by the action routines.
Inputs:
<ul>
<li> A pointer to the parser (an opaque structure.)
<li> The major token number.
<li> The minor token number.
<li> An option argument of a grammar-specified type.
</ul>
Outputs:
None. | func Xsqlite3Parser(tls *libc.TLS, yyp uintptr, yymajor int32, yyminor Token) {
bp := tls.Alloc(16)
defer tls.Free(16)
var yyact uint16
var yypParser uintptr = yyp
var pParse uintptr = (*YyParser)(unsafe.Pointer(yypParser)).FpParse
yyact = (*YyStackEntry)(unsafe.Pointer((*YyParser)(unsafe.Pointer(yypParser)).Fyytos)).Fstateno
for 1 != 0 {
yyact = yy_find_shift_action(tls, uint16(yymajor), yyact)
if int32(yyact) >= YY_MIN_REDUCE {
var yyruleno uint32 = uint32(int32(yyact) - YY_MIN_REDUCE)
if int32(yyRuleInfoNRhs[yyruleno]) == 0 {
if (*YyParser)(unsafe.Pointer(yypParser)).Fyytos >= (*YyParser)(unsafe.Pointer(yypParser)).FyystackEnd {
yyStackOverflow(tls, yypParser)
break
}
}
yyact = yy_reduce(tls, yypParser, yyruleno, yymajor, yyminor, pParse)
} else if int32(yyact) <= YY_MAX_SHIFTREDUCE {
yy_shift(tls, yypParser, yyact, uint16(yymajor), yyminor)
break
} else if int32(yyact) == YY_ACCEPT_ACTION {
(*YyParser)(unsafe.Pointer(yypParser)).Fyytos -= 24
yy_accept(tls, yypParser)
return
} else {
*(*Token)(unsafe.Pointer(bp)) = yyminor
yy_syntax_error(tls, yypParser, yymajor, yyminor)
yy_destructor(tls, yypParser, uint16(yymajor), bp)
break
}
}
return
} | func (c *conn) step(pstmt uintptr) (int, error) {
for {
switch rc := sqlite3.Xsqlite3_step(c.tls, pstmt); rc {
case sqliteLockedSharedcache:
if err := c.retry(pstmt); err != nil {
return sqlite3.SQLITE_LOCKED, err
}
case
sqlite3.SQLITE_DONE,
sqlite3.SQLITE_ROW:
return int(rc), nil
default:
return int(rc), c.errstr(rc)
}
}
} | 0.532852 | 42wim/matterbridge | vendor/modernc.org/sqlite/lib/sqlite_openbsd_arm64.go | 42wim/matterbridge | vendor/modernc.org/sqlite/sqlite.go | Apache-2.0 | go |
ParanoidUnmarshalJSONObjectExactFields unmarshals data as a JSON object, but failing on the slightest unexpected aspect
(including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields
must be present exactly once, and none other fields are accepted. | func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]any) error {
seenKeys := set.New[string]()
if err := ParanoidUnmarshalJSONObject(data, func(key string) any {
if valuePtr, ok := exactFields[key]; ok {
seenKeys.Add(key)
return valuePtr
}
return nil
}); err != nil {
return err
}
for key := range exactFields {
if !seenKeys.Contains(key) {
return JSONFormatError(fmt.Sprintf(`Key %q missing in a JSON object`, key))
}
}
return nil
} | func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) any) error {
seenKeys := set.New[string]()
dec := json.NewDecoder(bytes.NewReader(data))
t, err := dec.Token()
if err != nil {
return JSONFormatError(err.Error())
}
if t != json.Delim('{') {
return JSONFormatError(fmt.Sprintf("JSON object expected, got %#v", t))
}
for {
t, err := dec.Token()
if err != nil {
return JSONFormatError(err.Error())
}
if t == json.Delim('}') {
break
}
key, ok := t.(string)
if !ok {
// Coverage: This should never happen, dec.Token() rejects non-string-literals in this state.
return JSONFormatError(fmt.Sprintf("Key string literal expected, got %#v", t))
}
if seenKeys.Contains(key) {
return JSONFormatError(fmt.Sprintf("Duplicate key %q", key))
}
seenKeys.Add(key)
valuePtr := fieldResolver(key)
if valuePtr == nil {
return JSONFormatError(fmt.Sprintf("Unknown key %q", key))
}
// This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value.
if err := dec.Decode(valuePtr); err != nil {
return JSONFormatError(err.Error())
}
}
if _, err := dec.Token(); err != io.EOF {
return JSONFormatError("Unexpected data after JSON object")
}
return nil
} | 0.676414 | containers/podman-tui | vendor/github.com/containers/image/v5/signature/internal/json.go | containers/podman-tui | vendor/github.com/containers/image/v5/signature/internal/json.go | Apache-2.0 | go |
ParanoidUnmarshalJSONObjectExactFields unmarshals data as a JSON object, but failing on the slightest unexpected aspect
(including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields
must be present exactly once, and none other fields are accepted. | func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]any) error {
seenKeys := set.New[string]()
if err := ParanoidUnmarshalJSONObject(data, func(key string) any {
if valuePtr, ok := exactFields[key]; ok {
seenKeys.Add(key)
return valuePtr
}
return nil
}); err != nil {
return err
}
for key := range exactFields {
if !seenKeys.Contains(key) {
return JSONFormatError(fmt.Sprintf(`Key %q missing in a JSON object`, key))
}
}
return nil
} | func Unmarshal(data []byte, v interface{}) error {
return kjson.UnmarshalCaseSensitivePreserveInts(data, v)
} | 0.636854 | containers/podman-tui | vendor/github.com/containers/image/v5/signature/internal/json.go | k8snetworkplumbingwg/multus-cni | vendor/k8s.io/apimachinery/pkg/util/json/json.go | Apache-2.0 | go |
ParanoidUnmarshalJSONObjectExactFields unmarshals data as a JSON object, but failing on the slightest unexpected aspect
(including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields
must be present exactly once, and none other fields are accepted. | func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]any) error {
seenKeys := set.New[string]()
if err := ParanoidUnmarshalJSONObject(data, func(key string) any {
if valuePtr, ok := exactFields[key]; ok {
seenKeys.Add(key)
return valuePtr
}
return nil
}); err != nil {
return err
}
for key := range exactFields {
if !seenKeys.Contains(key) {
return JSONFormatError(fmt.Sprintf(`Key %q missing in a JSON object`, key))
}
}
return nil
} | func (t *cachedToken) UnmarshalJSON(b []byte) error {
var fields map[string]interface{}
if err := json.Unmarshal(b, &fields); err != nil {
return nil
}
t.UnknownFields = map[string]interface{}{}
for k, v := range fields {
var err error
switch k {
case "accessToken":
err = getTokenFieldString(v, &t.AccessToken)
case "expiresAt":
err = getTokenFieldRFC3339(v, &t.ExpiresAt)
case "refreshToken":
err = getTokenFieldString(v, &t.RefreshToken)
case "clientId":
err = getTokenFieldString(v, &t.ClientID)
case "clientSecret":
err = getTokenFieldString(v, &t.ClientSecret)
default:
t.UnknownFields[k] = v
}
if err != nil {
return fmt.Errorf("field %q, %v", k, err)
}
}
return nil
} | 0.632144 | containers/podman-tui | vendor/github.com/containers/image/v5/signature/internal/json.go | tektoncd/cli | vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go | Apache-2.0 | go |
ParanoidUnmarshalJSONObjectExactFields unmarshals data as a JSON object, but failing on the slightest unexpected aspect
(including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields
must be present exactly once, and none other fields are accepted. | func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]any) error {
seenKeys := set.New[string]()
if err := ParanoidUnmarshalJSONObject(data, func(key string) any {
if valuePtr, ok := exactFields[key]; ok {
seenKeys.Add(key)
return valuePtr
}
return nil
}); err != nil {
return err
}
for key := range exactFields {
if !seenKeys.Contains(key) {
return JSONFormatError(fmt.Sprintf(`Key %q missing in a JSON object`, key))
}
}
return nil
} | func UnmarshalStrict(in []byte, out interface{}) (err error) {
return unmarshal(in, out, true)
} | 0.623639 | containers/podman-tui | vendor/github.com/containers/image/v5/signature/internal/json.go | DavidDeSimone/OpenCloudSaves | vendor/github.com/go-yaml/yaml/yaml.go | MIT | go |
ParanoidUnmarshalJSONObjectExactFields unmarshals data as a JSON object, but failing on the slightest unexpected aspect
(including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields
must be present exactly once, and none other fields are accepted. | func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]any) error {
seenKeys := set.New[string]()
if err := ParanoidUnmarshalJSONObject(data, func(key string) any {
if valuePtr, ok := exactFields[key]; ok {
seenKeys.Add(key)
return valuePtr
}
return nil
}); err != nil {
return err
}
for key := range exactFields {
if !seenKeys.Contains(key) {
return JSONFormatError(fmt.Sprintf(`Key %q missing in a JSON object`, key))
}
}
return nil
} | func Unmarshal(b []byte, i interface{}) error {
if len(b) == 0 {
return nil
}
jdec := json.NewDecoder(bytes.NewBuffer(b))
jdec.UseNumber()
return unmarshalStruct(jdec, i)
} | 0.590373 | containers/podman-tui | vendor/github.com/containers/image/v5/signature/internal/json.go | tektoncd/cli | vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go | Apache-2.0 | go |
TreesgetAncestors returns a list of all ancestors of this node. The first node of list is the root
and the last node is the parent of this node.
goland:noinspection GoUnusedExportedFunction | func TreesgetAncestors(t Tree) []Tree {
ancestors := make([]Tree, 0)
t = t.GetParent()
for t != nil {
f := []Tree{t}
ancestors = append(f, ancestors...)
t = t.GetParent()
}
return ancestors
} | func TreesGetChildren(t Tree) []Tree {
list := make([]Tree, 0)
for i := 0; i < t.GetChildCount(); i++ {
list = append(list, t.GetChild(i))
}
return list
} | 0.760209 | google/cel-go | vendor/github.com/antlr4-go/antlr/v4/trees.go | google/cel-go | vendor/github.com/antlr4-go/antlr/v4/trees.go | Apache-2.0 | go |
TreesgetAncestors returns a list of all ancestors of this node. The first node of list is the root
and the last node is the parent of this node.
goland:noinspection GoUnusedExportedFunction | func TreesgetAncestors(t Tree) []Tree {
ancestors := make([]Tree, 0)
t = t.GetParent()
for t != nil {
f := []Tree{t}
ancestors = append(f, ancestors...)
t = t.GetParent()
}
return ancestors
} | func (n *Node) Ancestors() iter.Seq[*Node] {
_ = n.Parent // eager nil check
return func(yield func(*Node) bool) {
for p := n.Parent; p != nil && yield(p); p = p.Parent {
}
}
} | 0.672151 | google/cel-go | vendor/github.com/antlr4-go/antlr/v4/trees.go | Mirantis/cri-dockerd | vendor/golang.org/x/net/html/iter.go | Apache-2.0 | go |
TreesgetAncestors returns a list of all ancestors of this node. The first node of list is the root
and the last node is the parent of this node.
goland:noinspection GoUnusedExportedFunction | func TreesgetAncestors(t Tree) []Tree {
ancestors := make([]Tree, 0)
t = t.GetParent()
for t != nil {
f := []Tree{t}
ancestors = append(f, ancestors...)
t = t.GetParent()
}
return ancestors
} | func (n *Node) Descendants() iter.Seq[*Node] {
_ = n.FirstChild // eager nil check
return func(yield func(*Node) bool) {
n.descendants(yield)
}
} | 0.538569 | google/cel-go | vendor/github.com/antlr4-go/antlr/v4/trees.go | Mirantis/cri-dockerd | vendor/golang.org/x/net/html/iter.go | Apache-2.0 | go |
TreesgetAncestors returns a list of all ancestors of this node. The first node of list is the root
and the last node is the parent of this node.
goland:noinspection GoUnusedExportedFunction | func TreesgetAncestors(t Tree) []Tree {
ancestors := make([]Tree, 0)
t = t.GetParent()
for t != nil {
f := []Tree{t}
ancestors = append(f, ancestors...)
t = t.GetParent()
}
return ancestors
} | func (tr *Tree) GetSelectedNodes() []Treer {
if tr.Root == nil {
return nil
}
rn := tr.Root.AsCoreTree()
if len(rn.SelectedNodes) == 0 {
return rn.SelectedNodes
}
return rn.SelectedNodes
} | 0.537872 | google/cel-go | vendor/github.com/antlr4-go/antlr/v4/trees.go | cogentcore/core | core/tree.go | BSD-3-Clause | go |
TreesgetAncestors returns a list of all ancestors of this node. The first node of list is the root
and the last node is the parent of this node.
goland:noinspection GoUnusedExportedFunction | func TreesgetAncestors(t Tree) []Tree {
ancestors := make([]Tree, 0)
t = t.GetParent()
for t != nil {
f := []Tree{t}
ancestors = append(f, ancestors...)
t = t.GetParent()
}
return ancestors
} | func childrenOf(n ast.Node) []ast.Node {
var children []ast.Node
// First add nodes for all true subtrees.
ast.Inspect(n, func(node ast.Node) bool {
if node == n { // push n
return true // recur
}
if node != nil { // push child
children = append(children, node)
}
return false // no recursion
})
// Then add fake Nodes for bare tokens.
switch n := n.(type) {
case *ast.ArrayType:
children = append(children,
tok(n.Lbrack, len("[")),
tok(n.Elt.End(), len("]")))
case *ast.AssignStmt:
children = append(children,
tok(n.TokPos, len(n.Tok.String())))
case *ast.BasicLit:
children = append(children,
tok(n.ValuePos, len(n.Value)))
case *ast.BinaryExpr:
children = append(children, tok(n.OpPos, len(n.Op.String())))
case *ast.BlockStmt:
children = append(children,
tok(n.Lbrace, len("{")),
tok(n.Rbrace, len("}")))
case *ast.BranchStmt:
children = append(children,
tok(n.TokPos, len(n.Tok.String())))
case *ast.CallExpr:
children = append(children,
tok(n.Lparen, len("(")),
tok(n.Rparen, len(")")))
if n.Ellipsis != 0 {
children = append(children, tok(n.Ellipsis, len("...")))
}
case *ast.CaseClause:
if n.List == nil {
children = append(children,
tok(n.Case, len("default")))
} else {
children = append(children,
tok(n.Case, len("case")))
}
children = append(children, tok(n.Colon, len(":")))
case *ast.ChanType:
switch n.Dir {
case ast.RECV:
children = append(children, tok(n.Begin, len("<-chan")))
case ast.SEND:
children = append(children, tok(n.Begin, len("chan<-")))
case ast.RECV | ast.SEND:
children = append(children, tok(n.Begin, len("chan")))
}
case *ast.CommClause:
if n.Comm == nil {
children = append(children,
tok(n.Case, len("default")))
} else {
children = append(children,
tok(n.Case, len("case")))
}
children = append(children, tok(n.Colon, len(":")))
case *ast.Comment:
// nop
case *ast.CommentGroup:
// nop
case *ast.CompositeLit:
children = append(children,
tok(n.Lbrace, len("{")),
tok(n.Rbrace, len("{")))
case *ast.DeclStmt:
// nop
case *ast.DeferStmt:
children = append(children,
tok(n.Defer, len("defer")))
case *ast.Ellipsis:
children = append(children,
tok(n.Ellipsis, len("...")))
case *ast.EmptyStmt:
// nop
case *ast.ExprStmt:
// nop
case *ast.Field:
// TODO(adonovan): Field.{Doc,Comment,Tag}?
case *ast.FieldList:
children = append(children,
tok(n.Opening, len("(")), // or len("[")
tok(n.Closing, len(")"))) // or len("]")
case *ast.File:
// TODO test: Doc
children = append(children,
tok(n.Package, len("package")))
case *ast.ForStmt:
children = append(children,
tok(n.For, len("for")))
case *ast.FuncDecl:
// TODO(adonovan): FuncDecl.Comment?
// Uniquely, FuncDecl breaks the invariant that
// preorder traversal yields tokens in lexical order:
// in fact, FuncDecl.Recv precedes FuncDecl.Type.Func.
//
// As a workaround, we inline the case for FuncType
// here and order things correctly.
//
children = nil // discard ast.Walk(FuncDecl) info subtrees
children = append(children, tok(n.Type.Func, len("func")))
if n.Recv != nil {
children = append(children, n.Recv)
}
children = append(children, n.Name)
if tparams := typeparams.ForFuncType(n.Type); tparams != nil {
children = append(children, tparams)
}
if n.Type.Params != nil {
children = append(children, n.Type.Params)
}
if n.Type.Results != nil {
children = append(children, n.Type.Results)
}
if n.Body != nil {
children = append(children, n.Body)
}
case *ast.FuncLit:
// nop
case *ast.FuncType:
if n.Func != 0 {
children = append(children,
tok(n.Func, len("func")))
}
case *ast.GenDecl:
children = append(children,
tok(n.TokPos, len(n.Tok.String())))
if n.Lparen != 0 {
children = append(children,
tok(n.Lparen, len("(")),
tok(n.Rparen, len(")")))
}
case *ast.GoStmt:
children = append(children,
tok(n.Go, len("go")))
case *ast.Ident:
children = append(children,
tok(n.NamePos, len(n.Name)))
case *ast.IfStmt:
children = append(children,
tok(n.If, len("if")))
case *ast.ImportSpec:
// TODO(adonovan): ImportSpec.{Doc,EndPos}?
case *ast.IncDecStmt:
children = append(children,
tok(n.TokPos, len(n.Tok.String())))
case *ast.IndexExpr:
children = append(children,
tok(n.Lbrack, len("[")),
tok(n.Rbrack, len("]")))
case *typeparams.IndexListExpr:
children = append(children,
tok(n.Lbrack, len("[")),
tok(n.Rbrack, len("]")))
case *ast.InterfaceType:
children = append(children,
tok(n.Interface, len("interface")))
case *ast.KeyValueExpr:
children = append(children,
tok(n.Colon, len(":")))
case *ast.LabeledStmt:
children = append(children,
tok(n.Colon, len(":")))
case *ast.MapType:
children = append(children,
tok(n.Map, len("map")))
case *ast.ParenExpr:
children = append(children,
tok(n.Lparen, len("(")),
tok(n.Rparen, len(")")))
case *ast.RangeStmt:
children = append(children,
tok(n.For, len("for")),
tok(n.TokPos, len(n.Tok.String())))
case *ast.ReturnStmt:
children = append(children,
tok(n.Return, len("return")))
case *ast.SelectStmt:
children = append(children,
tok(n.Select, len("select")))
case *ast.SelectorExpr:
// nop
case *ast.SendStmt:
children = append(children,
tok(n.Arrow, len("<-")))
case *ast.SliceExpr:
children = append(children,
tok(n.Lbrack, len("[")),
tok(n.Rbrack, len("]")))
case *ast.StarExpr:
children = append(children, tok(n.Star, len("*")))
case *ast.StructType:
children = append(children, tok(n.Struct, len("struct")))
case *ast.SwitchStmt:
children = append(children, tok(n.Switch, len("switch")))
case *ast.TypeAssertExpr:
children = append(children,
tok(n.Lparen-1, len(".")),
tok(n.Lparen, len("(")),
tok(n.Rparen, len(")")))
case *ast.TypeSpec:
// TODO(adonovan): TypeSpec.{Doc,Comment}?
case *ast.TypeSwitchStmt:
children = append(children, tok(n.Switch, len("switch")))
case *ast.UnaryExpr:
children = append(children, tok(n.OpPos, len(n.Op.String())))
case *ast.ValueSpec:
// TODO(adonovan): ValueSpec.{Doc,Comment}?
case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt:
// nop
} | 0.529737 | google/cel-go | vendor/github.com/antlr4-go/antlr/v4/trees.go | LockGit/gochat | vendor/golang.org/x/tools/go/ast/astutil/enclosing.go | MIT | go |
WithMatchLabelExpressions adds the given value to the MatchLabelExpressions field in the declarative configuration
and returns the receiver, so that objects can be build by chaining "With" function invocations.
If called multiple times, values provided by each call will be appended to the MatchLabelExpressions field. | func (b *TopologySelectorTermApplyConfiguration) WithMatchLabelExpressions(values ...*TopologySelectorLabelRequirementApplyConfiguration) *TopologySelectorTermApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithMatchLabelExpressions")
}
b.MatchLabelExpressions = append(b.MatchLabelExpressions, *values[i])
}
return b
} | func (b *LabelSelectorApplyConfiguration) WithMatchExpressions(values ...*LabelSelectorRequirementApplyConfiguration) *LabelSelectorApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithMatchExpressions")
}
b.MatchExpressions = append(b.MatchExpressions, *values[i])
}
return b
} | 0.873758 | devtron-labs/silver-surfer | vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go | devtron-labs/silver-surfer | vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselector.go | Apache-2.0 | go |
WithMatchLabelExpressions adds the given value to the MatchLabelExpressions field in the declarative configuration
and returns the receiver, so that objects can be build by chaining "With" function invocations.
If called multiple times, values provided by each call will be appended to the MatchLabelExpressions field. | func (b *TopologySelectorTermApplyConfiguration) WithMatchLabelExpressions(values ...*TopologySelectorLabelRequirementApplyConfiguration) *TopologySelectorTermApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithMatchLabelExpressions")
}
b.MatchLabelExpressions = append(b.MatchLabelExpressions, *values[i])
}
return b
} | func (b *PodAffinityTermApplyConfiguration) WithMatchLabelKeys(values ...string) *PodAffinityTermApplyConfiguration {
for i := range values {
b.MatchLabelKeys = append(b.MatchLabelKeys, values[i])
}
return b
} | 0.860895 | devtron-labs/silver-surfer | vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go | tektoncd/cli | vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go | Apache-2.0 | go |
WithMatchLabelExpressions adds the given value to the MatchLabelExpressions field in the declarative configuration
and returns the receiver, so that objects can be build by chaining "With" function invocations.
If called multiple times, values provided by each call will be appended to the MatchLabelExpressions field. | func (b *TopologySelectorTermApplyConfiguration) WithMatchLabelExpressions(values ...*TopologySelectorLabelRequirementApplyConfiguration) *TopologySelectorTermApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithMatchLabelExpressions")
}
b.MatchLabelExpressions = append(b.MatchLabelExpressions, *values[i])
}
return b
} | func (b *LabelSelectorApplyConfiguration) WithMatchLabels(entries map[string]string) *LabelSelectorApplyConfiguration {
if b.MatchLabels == nil && len(entries) > 0 {
b.MatchLabels = make(map[string]string, len(entries))
}
for k, v := range entries {
b.MatchLabels[k] = v
}
return b
} | 0.76443 | devtron-labs/silver-surfer | vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go | devtron-labs/silver-surfer | vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselector.go | Apache-2.0 | go |
WithMatchLabelExpressions adds the given value to the MatchLabelExpressions field in the declarative configuration
and returns the receiver, so that objects can be build by chaining "With" function invocations.
If called multiple times, values provided by each call will be appended to the MatchLabelExpressions field. | func (b *TopologySelectorTermApplyConfiguration) WithMatchLabelExpressions(values ...*TopologySelectorLabelRequirementApplyConfiguration) *TopologySelectorTermApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithMatchLabelExpressions")
}
b.MatchLabelExpressions = append(b.MatchLabelExpressions, *values[i])
}
return b
} | func (b *NodeSelectorTermApplyConfiguration) WithMatchFields(values ...*NodeSelectorRequirementApplyConfiguration) *NodeSelectorTermApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithMatchFields")
}
b.MatchFields = append(b.MatchFields, *values[i])
}
return b
} | 0.737585 | devtron-labs/silver-surfer | vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go | devtron-labs/silver-surfer | vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorterm.go | Apache-2.0 | go |
WithMatchLabelExpressions adds the given value to the MatchLabelExpressions field in the declarative configuration
and returns the receiver, so that objects can be build by chaining "With" function invocations.
If called multiple times, values provided by each call will be appended to the MatchLabelExpressions field. | func (b *TopologySelectorTermApplyConfiguration) WithMatchLabelExpressions(values ...*TopologySelectorLabelRequirementApplyConfiguration) *TopologySelectorTermApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithMatchLabelExpressions")
}
b.MatchLabelExpressions = append(b.MatchLabelExpressions, *values[i])
}
return b
} | func (b *ValidatingWebhookApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *ValidatingWebhookApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithMatchConditions")
}
b.MatchConditions = append(b.MatchConditions, *values[i])
}
return b
} | 0.728306 | devtron-labs/silver-surfer | vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go | tektoncd/cli | vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go | Apache-2.0 | go |
DeleteVoiceMessageSpendLimitOverrideRequest generates a "aws/request.Request" representing the
client's request for the DeleteVoiceMessageSpendLimitOverride operation. The "output" return
value will be populated with the request's response once the request completes
successfully.
Use "Send" method on the returned Request to send the API call to the service.
the "output" return value is not valid until after Send returns without error.
See DeleteVoiceMessageSpendLimitOverride for more information on using the DeleteVoiceMessageSpendLimitOverride
API call, and error handling.
This method is useful when you want to inject custom logic or configuration
into the SDK's request lifecycle. Such as custom headers, or retry logic.
// Example sending a request using the DeleteVoiceMessageSpendLimitOverrideRequest method.
req, resp := client.DeleteVoiceMessageSpendLimitOverrideRequest(params)
err := req.Send()
if err == nil { // resp is now filled
fmt.Println(resp)
}
See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-sms-voice-v2-2022-03-31/DeleteVoiceMessageSpendLimitOverride | func (c *PinpointSMSVoiceV2) DeleteVoiceMessageSpendLimitOverrideRequest(input *DeleteVoiceMessageSpendLimitOverrideInput) (req *request.Request, output *DeleteVoiceMessageSpendLimitOverrideOutput) {
op := &request.Operation{
Name: opDeleteVoiceMessageSpendLimitOverride,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DeleteVoiceMessageSpendLimitOverrideInput{}
}
output = &DeleteVoiceMessageSpendLimitOverrideOutput{}
req = c.newRequest(op, input, output)
return
} | func (c *PinpointSMSVoiceV2) DeleteVoiceMessageSpendLimitOverrideWithContext(ctx aws.Context, input *DeleteVoiceMessageSpendLimitOverrideInput, opts ...request.Option) (*DeleteVoiceMessageSpendLimitOverrideOutput, error) {
req, out := c.DeleteVoiceMessageSpendLimitOverrideRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.881435 | aws/aws-sdk-go | service/pinpointsmsvoicev2/api.go | aws/aws-sdk-go | service/pinpointsmsvoicev2/api.go | Apache-2.0 | go |
DeleteVoiceMessageSpendLimitOverrideRequest generates a "aws/request.Request" representing the
client's request for the DeleteVoiceMessageSpendLimitOverride operation. The "output" return
value will be populated with the request's response once the request completes
successfully.
Use "Send" method on the returned Request to send the API call to the service.
the "output" return value is not valid until after Send returns without error.
See DeleteVoiceMessageSpendLimitOverride for more information on using the DeleteVoiceMessageSpendLimitOverride
API call, and error handling.
This method is useful when you want to inject custom logic or configuration
into the SDK's request lifecycle. Such as custom headers, or retry logic.
// Example sending a request using the DeleteVoiceMessageSpendLimitOverrideRequest method.
req, resp := client.DeleteVoiceMessageSpendLimitOverrideRequest(params)
err := req.Send()
if err == nil { // resp is now filled
fmt.Println(resp)
}
See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-sms-voice-v2-2022-03-31/DeleteVoiceMessageSpendLimitOverride | func (c *PinpointSMSVoiceV2) DeleteVoiceMessageSpendLimitOverrideRequest(input *DeleteVoiceMessageSpendLimitOverrideInput) (req *request.Request, output *DeleteVoiceMessageSpendLimitOverrideOutput) {
op := &request.Operation{
Name: opDeleteVoiceMessageSpendLimitOverride,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DeleteVoiceMessageSpendLimitOverrideInput{}
}
output = &DeleteVoiceMessageSpendLimitOverrideOutput{}
req = c.newRequest(op, input, output)
return
} | func (c *PinpointSMSVoiceV2) SetVoiceMessageSpendLimitOverrideRequest(input *SetVoiceMessageSpendLimitOverrideInput) (req *request.Request, output *SetVoiceMessageSpendLimitOverrideOutput) {
op := &request.Operation{
Name: opSetVoiceMessageSpendLimitOverride,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &SetVoiceMessageSpendLimitOverrideInput{}
}
output = &SetVoiceMessageSpendLimitOverrideOutput{}
req = c.newRequest(op, input, output)
return
} | 0.881233 | aws/aws-sdk-go | service/pinpointsmsvoicev2/api.go | aws/aws-sdk-go | service/pinpointsmsvoicev2/api.go | Apache-2.0 | go |
DeleteVoiceMessageSpendLimitOverrideRequest generates a "aws/request.Request" representing the
client's request for the DeleteVoiceMessageSpendLimitOverride operation. The "output" return
value will be populated with the request's response once the request completes
successfully.
Use "Send" method on the returned Request to send the API call to the service.
the "output" return value is not valid until after Send returns without error.
See DeleteVoiceMessageSpendLimitOverride for more information on using the DeleteVoiceMessageSpendLimitOverride
API call, and error handling.
This method is useful when you want to inject custom logic or configuration
into the SDK's request lifecycle. Such as custom headers, or retry logic.
// Example sending a request using the DeleteVoiceMessageSpendLimitOverrideRequest method.
req, resp := client.DeleteVoiceMessageSpendLimitOverrideRequest(params)
err := req.Send()
if err == nil { // resp is now filled
fmt.Println(resp)
}
See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-sms-voice-v2-2022-03-31/DeleteVoiceMessageSpendLimitOverride | func (c *PinpointSMSVoiceV2) DeleteVoiceMessageSpendLimitOverrideRequest(input *DeleteVoiceMessageSpendLimitOverrideInput) (req *request.Request, output *DeleteVoiceMessageSpendLimitOverrideOutput) {
op := &request.Operation{
Name: opDeleteVoiceMessageSpendLimitOverride,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DeleteVoiceMessageSpendLimitOverrideInput{}
}
output = &DeleteVoiceMessageSpendLimitOverrideOutput{}
req = c.newRequest(op, input, output)
return
} | func (c *PinpointSMSVoiceV2) DeleteTextMessageSpendLimitOverrideRequest(input *DeleteTextMessageSpendLimitOverrideInput) (req *request.Request, output *DeleteTextMessageSpendLimitOverrideOutput) {
op := &request.Operation{
Name: opDeleteTextMessageSpendLimitOverride,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DeleteTextMessageSpendLimitOverrideInput{}
}
output = &DeleteTextMessageSpendLimitOverrideOutput{}
req = c.newRequest(op, input, output)
return
} | 0.870349 | aws/aws-sdk-go | service/pinpointsmsvoicev2/api.go | aws/aws-sdk-go | service/pinpointsmsvoicev2/api.go | Apache-2.0 | go |
DeleteVoiceMessageSpendLimitOverrideRequest generates a "aws/request.Request" representing the
client's request for the DeleteVoiceMessageSpendLimitOverride operation. The "output" return
value will be populated with the request's response once the request completes
successfully.
Use "Send" method on the returned Request to send the API call to the service.
the "output" return value is not valid until after Send returns without error.
See DeleteVoiceMessageSpendLimitOverride for more information on using the DeleteVoiceMessageSpendLimitOverride
API call, and error handling.
This method is useful when you want to inject custom logic or configuration
into the SDK's request lifecycle. Such as custom headers, or retry logic.
// Example sending a request using the DeleteVoiceMessageSpendLimitOverrideRequest method.
req, resp := client.DeleteVoiceMessageSpendLimitOverrideRequest(params)
err := req.Send()
if err == nil { // resp is now filled
fmt.Println(resp)
}
See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-sms-voice-v2-2022-03-31/DeleteVoiceMessageSpendLimitOverride | func (c *PinpointSMSVoiceV2) DeleteVoiceMessageSpendLimitOverrideRequest(input *DeleteVoiceMessageSpendLimitOverrideInput) (req *request.Request, output *DeleteVoiceMessageSpendLimitOverrideOutput) {
op := &request.Operation{
Name: opDeleteVoiceMessageSpendLimitOverride,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DeleteVoiceMessageSpendLimitOverrideInput{}
}
output = &DeleteVoiceMessageSpendLimitOverrideOutput{}
req = c.newRequest(op, input, output)
return
} | func (c *PinpointSMSVoiceV2) DeleteVoiceMessageSpendLimitOverride(input *DeleteVoiceMessageSpendLimitOverrideInput) (*DeleteVoiceMessageSpendLimitOverrideOutput, error) {
req, out := c.DeleteVoiceMessageSpendLimitOverrideRequest(input)
return out, req.Send()
} | 0.811373 | aws/aws-sdk-go | service/pinpointsmsvoicev2/api.go | aws/aws-sdk-go | service/pinpointsmsvoicev2/api.go | Apache-2.0 | go |
DeleteVoiceMessageSpendLimitOverrideRequest generates a "aws/request.Request" representing the
client's request for the DeleteVoiceMessageSpendLimitOverride operation. The "output" return
value will be populated with the request's response once the request completes
successfully.
Use "Send" method on the returned Request to send the API call to the service.
the "output" return value is not valid until after Send returns without error.
See DeleteVoiceMessageSpendLimitOverride for more information on using the DeleteVoiceMessageSpendLimitOverride
API call, and error handling.
This method is useful when you want to inject custom logic or configuration
into the SDK's request lifecycle. Such as custom headers, or retry logic.
// Example sending a request using the DeleteVoiceMessageSpendLimitOverrideRequest method.
req, resp := client.DeleteVoiceMessageSpendLimitOverrideRequest(params)
err := req.Send()
if err == nil { // resp is now filled
fmt.Println(resp)
}
See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-sms-voice-v2-2022-03-31/DeleteVoiceMessageSpendLimitOverride | func (c *PinpointSMSVoiceV2) DeleteVoiceMessageSpendLimitOverrideRequest(input *DeleteVoiceMessageSpendLimitOverrideInput) (req *request.Request, output *DeleteVoiceMessageSpendLimitOverrideOutput) {
op := &request.Operation{
Name: opDeleteVoiceMessageSpendLimitOverride,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DeleteVoiceMessageSpendLimitOverrideInput{}
}
output = &DeleteVoiceMessageSpendLimitOverrideOutput{}
req = c.newRequest(op, input, output)
return
} | func (c *PinpointSMSVoiceV2) SetVoiceMessageSpendLimitOverrideWithContext(ctx aws.Context, input *SetVoiceMessageSpendLimitOverrideInput, opts ...request.Option) (*SetVoiceMessageSpendLimitOverrideOutput, error) {
req, out := c.SetVoiceMessageSpendLimitOverrideRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.771914 | aws/aws-sdk-go | service/pinpointsmsvoicev2/api.go | aws/aws-sdk-go | service/pinpointsmsvoicev2/api.go | Apache-2.0 | go |
C documentation
/*
** A constraint has failed while inserting a row into an rtree table.
** Assuming no OOM error occurs, this function sets the error message
** (at pRtree->base.zErrMsg) to an appropriate value and returns
** SQLITE_CONSTRAINT.
**
** Parameter iCol is the index of the leftmost column involved in the
** constraint failure. If it is 0, then the constraint that failed is
** the unique constraint on the id column. Otherwise, it is the rtree
** (c1<=c2) constraint on columns iCol and iCol+1 that has failed.
**
** If an OOM occurs, SQLITE_NOMEM is returned instead of SQLITE_CONSTRAINT.
*/ | func _rtreeConstraintError(tls *libc.TLS, pRtree uintptr, iCol int32) (r int32) {
bp := tls.Alloc(48)
defer tls.Free(48)
var rc, v1 int32
var zCol, zCol1, zCol2, zSql uintptr
var _ /* pStmt at bp+0 */ uintptr
_, _, _, _, _, _ = rc, zCol, zCol1, zCol2, zSql, v1
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
zSql = Xsqlite3_mprintf(tls, __ccgo_ts+26102, libc.VaList(bp+16, (*TRtree)(unsafe.Pointer(pRtree)).FzDb, (*TRtree)(unsafe.Pointer(pRtree)).FzName))
if zSql != 0 {
rc = Xsqlite3_prepare_v2(tls, (*TRtree)(unsafe.Pointer(pRtree)).Fdb, zSql, -int32(1), bp, uintptr(0))
} else {
rc = int32(SQLITE_NOMEM)
}
Xsqlite3_free(tls, zSql)
if rc == SQLITE_OK {
if iCol == 0 {
zCol = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp)), 0)
(*TRtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, __ccgo_ts+26122, libc.VaList(bp+16, (*TRtree)(unsafe.Pointer(pRtree)).FzName, zCol))
} else {
zCol1 = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp)), iCol)
zCol2 = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp)), iCol+int32(1))
(*TRtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, __ccgo_ts+26154, libc.VaList(bp+16, (*TRtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2))
}
}
Xsqlite3_finalize(tls, *(*uintptr)(unsafe.Pointer(bp)))
if rc == SQLITE_OK {
v1 = int32(SQLITE_CONSTRAINT)
} else {
v1 = rc
}
return v1
} | func _sqlite3AddCheckConstraint(tls *libc.TLS, pParse uintptr, pCheckExpr uintptr, zStart uintptr, zEnd uintptr) {
bp := tls.Alloc(16)
defer tls.Free(16)
var db, pTab uintptr
var _ /* t at bp+0 */ TToken
_, _ = db, pTab
pTab = (*TParse)(unsafe.Pointer(pParse)).FpNewTable
db = (*TParse)(unsafe.Pointer(pParse)).Fdb
if pTab != 0 && !(libc.Int32FromUint8((*TParse)(unsafe.Pointer(pParse)).FeParseMode) == libc.Int32FromInt32(PARSE_MODE_DECLARE_VTAB)) && !(_sqlite3BtreeIsReadonly(tls, (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr((*Tsqlite3)(unsafe.Pointer(db)).Finit1.FiDb)*32))).FpBt) != 0) {
(*TTable)(unsafe.Pointer(pTab)).FpCheck = _sqlite3ExprListAppend(tls, pParse, (*TTable)(unsafe.Pointer(pTab)).FpCheck, pCheckExpr)
if (*TParse)(unsafe.Pointer(pParse)).FconstraintName.Fn != 0 {
_sqlite3ExprListSetName(tls, pParse, (*TTable)(unsafe.Pointer(pTab)).FpCheck, pParse+112, int32(1))
} else {
zStart++
for {
if !(libc.Int32FromUint8(_sqlite3CtypeMap[libc.Uint8FromInt8(*(*int8)(unsafe.Pointer(zStart)))])&int32(0x01) != 0) {
break
}
goto _1
_1:
;
zStart++
}
for libc.Int32FromUint8(_sqlite3CtypeMap[libc.Uint8FromInt8(*(*int8)(unsafe.Pointer(zEnd + uintptr(-libc.Int32FromInt32(1)))))])&int32(0x01) != 0 {
zEnd--
}
(*(*TToken)(unsafe.Pointer(bp))).Fz = zStart
(*(*TToken)(unsafe.Pointer(bp))).Fn = libc.Uint32FromInt32(int32(int64(zEnd) - int64((*(*TToken)(unsafe.Pointer(bp))).Fz)))
_sqlite3ExprListSetName(tls, pParse, (*TTable)(unsafe.Pointer(pTab)).FpCheck, bp, int32(1))
}
} else {
_sqlite3ExprDelete(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, pCheckExpr)
}
} | 0.670493 | umputun/spot | vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go | umputun/spot | vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go | MIT | go |
C documentation
/*
** A constraint has failed while inserting a row into an rtree table.
** Assuming no OOM error occurs, this function sets the error message
** (at pRtree->base.zErrMsg) to an appropriate value and returns
** SQLITE_CONSTRAINT.
**
** Parameter iCol is the index of the leftmost column involved in the
** constraint failure. If it is 0, then the constraint that failed is
** the unique constraint on the id column. Otherwise, it is the rtree
** (c1<=c2) constraint on columns iCol and iCol+1 that has failed.
**
** If an OOM occurs, SQLITE_NOMEM is returned instead of SQLITE_CONSTRAINT.
*/ | func _rtreeConstraintError(tls *libc.TLS, pRtree uintptr, iCol int32) (r int32) {
bp := tls.Alloc(48)
defer tls.Free(48)
var rc, v1 int32
var zCol, zCol1, zCol2, zSql uintptr
var _ /* pStmt at bp+0 */ uintptr
_, _, _, _, _, _ = rc, zCol, zCol1, zCol2, zSql, v1
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
zSql = Xsqlite3_mprintf(tls, __ccgo_ts+26102, libc.VaList(bp+16, (*TRtree)(unsafe.Pointer(pRtree)).FzDb, (*TRtree)(unsafe.Pointer(pRtree)).FzName))
if zSql != 0 {
rc = Xsqlite3_prepare_v2(tls, (*TRtree)(unsafe.Pointer(pRtree)).Fdb, zSql, -int32(1), bp, uintptr(0))
} else {
rc = int32(SQLITE_NOMEM)
}
Xsqlite3_free(tls, zSql)
if rc == SQLITE_OK {
if iCol == 0 {
zCol = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp)), 0)
(*TRtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, __ccgo_ts+26122, libc.VaList(bp+16, (*TRtree)(unsafe.Pointer(pRtree)).FzName, zCol))
} else {
zCol1 = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp)), iCol)
zCol2 = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp)), iCol+int32(1))
(*TRtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, __ccgo_ts+26154, libc.VaList(bp+16, (*TRtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2))
}
}
Xsqlite3_finalize(tls, *(*uintptr)(unsafe.Pointer(bp)))
if rc == SQLITE_OK {
v1 = int32(SQLITE_CONSTRAINT)
} else {
v1 = rc
}
return v1
} | func _rtreeNonleafConstraint(tls *libc.TLS, p uintptr, eInt int32, pCellData uintptr, peWithin uintptr) {
bp := tls.Alloc(16)
defer tls.Free(16)
var val, v1, v2, v3, v4 Tsqlite3_rtree_dbl
var _ /* c at bp+0 */ TRtreeCoord
var _ /* c at bp+12 */ TRtreeCoord
var _ /* c at bp+4 */ TRtreeCoord
var _ /* c at bp+8 */ TRtreeCoord
_, _, _, _, _ = val, v1, v2, v3, v4 /* Coordinate value convert to a double */
/* p->iCoord might point to either a lower or upper bound coordinate
** in a coordinate pair. But make pCellData point to the lower bound.
*/
pCellData += uintptr(int32(8) + int32(4)*((*TRtreeConstraint)(unsafe.Pointer(p)).FiCoord&int32(0xfe)))
switch (*TRtreeConstraint)(unsafe.Pointer(p)).Fop {
case int32(RTREE_TRUE):
return /* Always satisfied */
case int32(RTREE_FALSE):
case int32(RTREE_EQ):
/* Coordinate decoded */ libc.Xmemcpy(tls, bp, pCellData, uint64(4))
*(*Tu32)(unsafe.Pointer(bp)) = *(*Tu32)(unsafe.Pointer(bp))>>libc.Int32FromInt32(24)&uint32(0xff) | *(*Tu32)(unsafe.Pointer(bp))>>libc.Int32FromInt32(8)&uint32(0xff00) | *(*Tu32)(unsafe.Pointer(bp))&uint32(0xff)<<int32(24) | *(*Tu32)(unsafe.Pointer(bp))&uint32(0xff00)<<int32(8)
if eInt != 0 {
v1 = float64(*(*int32)(unsafe.Pointer(bp)))
} else {
v1 = float64(*(*TRtreeValue)(unsafe.Pointer(bp)))
}
val = v1
/* val now holds the lower bound of the coordinate pair */
if *(*TRtreeDValue)(unsafe.Pointer(p + 8)) >= val {
pCellData += uintptr(4)
/* Coordinate decoded */ libc.Xmemcpy(tls, bp+4, pCellData, uint64(4))
*(*Tu32)(unsafe.Pointer(bp + 4)) = *(*Tu32)(unsafe.Pointer(bp + 4))>>libc.Int32FromInt32(24)&uint32(0xff) | *(*Tu32)(unsafe.Pointer(bp + 4))>>libc.Int32FromInt32(8)&uint32(0xff00) | *(*Tu32)(unsafe.Pointer(bp + 4))&uint32(0xff)<<int32(24) | *(*Tu32)(unsafe.Pointer(bp + 4))&uint32(0xff00)<<int32(8)
if eInt != 0 {
v2 = float64(*(*int32)(unsafe.Pointer(bp + 4)))
} else {
v2 = float64(*(*TRtreeValue)(unsafe.Pointer(bp + 4)))
}
val = v2
/* val now holds the upper bound of the coordinate pair */
if *(*TRtreeDValue)(unsafe.Pointer(p + 8)) <= val {
return
}
}
case int32(RTREE_LE):
fallthrough
case int32(RTREE_LT):
/* Coordinate decoded */ libc.Xmemcpy(tls, bp+8, pCellData, uint64(4))
*(*Tu32)(unsafe.Pointer(bp + 8)) = *(*Tu32)(unsafe.Pointer(bp + 8))>>libc.Int32FromInt32(24)&uint32(0xff) | *(*Tu32)(unsafe.Pointer(bp + 8))>>libc.Int32FromInt32(8)&uint32(0xff00) | *(*Tu32)(unsafe.Pointer(bp + 8))&uint32(0xff)<<int32(24) | *(*Tu32)(unsafe.Pointer(bp + 8))&uint32(0xff00)<<int32(8)
if eInt != 0 {
v3 = float64(*(*int32)(unsafe.Pointer(bp + 8)))
} else {
v3 = float64(*(*TRtreeValue)(unsafe.Pointer(bp + 8)))
}
val = v3
/* val now holds the lower bound of the coordinate pair */
if *(*TRtreeDValue)(unsafe.Pointer(p + 8)) >= val {
return
}
default:
pCellData += uintptr(4)
/* Coordinate decoded */ libc.Xmemcpy(tls, bp+12, pCellData, uint64(4))
*(*Tu32)(unsafe.Pointer(bp + 12)) = *(*Tu32)(unsafe.Pointer(bp + 12))>>libc.Int32FromInt32(24)&uint32(0xff) | *(*Tu32)(unsafe.Pointer(bp + 12))>>libc.Int32FromInt32(8)&uint32(0xff00) | *(*Tu32)(unsafe.Pointer(bp + 12))&uint32(0xff)<<int32(24) | *(*Tu32)(unsafe.Pointer(bp + 12))&uint32(0xff00)<<int32(8)
if eInt != 0 {
v4 = float64(*(*int32)(unsafe.Pointer(bp + 12)))
} else {
v4 = float64(*(*TRtreeValue)(unsafe.Pointer(bp + 12)))
}
val = v4
/* val now holds the upper bound of the coordinate pair */
if *(*TRtreeDValue)(unsafe.Pointer(p + 8)) <= val {
return
}
break
}
*(*int32)(unsafe.Pointer(peWithin)) = NOT_WITHIN
} | 0.640225 | umputun/spot | vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go | umputun/spot | vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go | MIT | go |
C documentation
/*
** A constraint has failed while inserting a row into an rtree table.
** Assuming no OOM error occurs, this function sets the error message
** (at pRtree->base.zErrMsg) to an appropriate value and returns
** SQLITE_CONSTRAINT.
**
** Parameter iCol is the index of the leftmost column involved in the
** constraint failure. If it is 0, then the constraint that failed is
** the unique constraint on the id column. Otherwise, it is the rtree
** (c1<=c2) constraint on columns iCol and iCol+1 that has failed.
**
** If an OOM occurs, SQLITE_NOMEM is returned instead of SQLITE_CONSTRAINT.
*/ | func _rtreeConstraintError(tls *libc.TLS, pRtree uintptr, iCol int32) (r int32) {
bp := tls.Alloc(48)
defer tls.Free(48)
var rc, v1 int32
var zCol, zCol1, zCol2, zSql uintptr
var _ /* pStmt at bp+0 */ uintptr
_, _, _, _, _, _ = rc, zCol, zCol1, zCol2, zSql, v1
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
zSql = Xsqlite3_mprintf(tls, __ccgo_ts+26102, libc.VaList(bp+16, (*TRtree)(unsafe.Pointer(pRtree)).FzDb, (*TRtree)(unsafe.Pointer(pRtree)).FzName))
if zSql != 0 {
rc = Xsqlite3_prepare_v2(tls, (*TRtree)(unsafe.Pointer(pRtree)).Fdb, zSql, -int32(1), bp, uintptr(0))
} else {
rc = int32(SQLITE_NOMEM)
}
Xsqlite3_free(tls, zSql)
if rc == SQLITE_OK {
if iCol == 0 {
zCol = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp)), 0)
(*TRtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, __ccgo_ts+26122, libc.VaList(bp+16, (*TRtree)(unsafe.Pointer(pRtree)).FzName, zCol))
} else {
zCol1 = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp)), iCol)
zCol2 = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp)), iCol+int32(1))
(*TRtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, __ccgo_ts+26154, libc.VaList(bp+16, (*TRtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2))
}
}
Xsqlite3_finalize(tls, *(*uintptr)(unsafe.Pointer(bp)))
if rc == SQLITE_OK {
v1 = int32(SQLITE_CONSTRAINT)
} else {
v1 = rc
}
return v1
} | func _jsonEachBestIndex(tls *libc.TLS, tab uintptr, pIdxInfo uintptr) (r int32) {
var aIdx [2]int32
var i, iCol, iMask, idxMask, unusableMask, v1 int32
var pConstraint uintptr
_, _, _, _, _, _, _, _ = aIdx, i, iCol, iMask, idxMask, pConstraint, unusableMask, v1 /* Index of constraints for JSON and ROOT */
unusableMask = 0 /* Mask of unusable JSON and ROOT constraints */
idxMask = 0
/* This implementation assumes that JSON and ROOT are the last two
** columns in the table */
_ = tab
v1 = -libc.Int32FromInt32(1)
aIdx[int32(1)] = v1
aIdx[0] = v1
pConstraint = (*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraint
i = 0
for {
if !(i < (*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FnConstraint) {
break
}
if (*Tsqlite3_index_constraint)(unsafe.Pointer(pConstraint)).FiColumn < int32(JEACH_JSON) {
goto _2
}
iCol = (*Tsqlite3_index_constraint)(unsafe.Pointer(pConstraint)).FiColumn - int32(JEACH_JSON)
iMask = int32(1) << iCol
if libc.Int32FromUint8((*Tsqlite3_index_constraint)(unsafe.Pointer(pConstraint)).Fusable) == 0 {
unusableMask |= iMask
} else {
if libc.Int32FromUint8((*Tsqlite3_index_constraint)(unsafe.Pointer(pConstraint)).Fop) == int32(SQLITE_INDEX_CONSTRAINT_EQ) {
aIdx[iCol] = i
idxMask |= iMask
}
}
goto _2
_2:
;
i++
pConstraint += 12
}
if (*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FnOrderBy > 0 && (*(*Tsqlite3_index_orderby)(unsafe.Pointer((*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaOrderBy))).FiColumn < 0 && libc.Int32FromUint8((*(*Tsqlite3_index_orderby)(unsafe.Pointer((*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaOrderBy))).Fdesc) == 0 {
(*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).ForderByConsumed = int32(1)
}
if unusableMask & ^idxMask != 0 {
/* If there are any unusable constraints on JSON or ROOT, then reject
** this entire plan */
return int32(SQLITE_CONSTRAINT)
}
if aIdx[0] < 0 {
/* No JSON input. Leave estimatedCost at the huge value that it was
** initialized to to discourage the query planner from selecting this
** plan. */
(*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = 0
} else {
(*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = float64(1)
i = aIdx[0]
(*(*Tsqlite3_index_constraint_usage)(unsafe.Pointer((*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(i)*8))).FargvIndex = int32(1)
(*(*Tsqlite3_index_constraint_usage)(unsafe.Pointer((*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(i)*8))).Fomit = uint8(1)
if aIdx[int32(1)] < 0 {
(*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = int32(1) /* Only JSON supplied. Plan 1 */
} else {
i = aIdx[int32(1)]
(*(*Tsqlite3_index_constraint_usage)(unsafe.Pointer((*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(i)*8))).FargvIndex = int32(2)
(*(*Tsqlite3_index_constraint_usage)(unsafe.Pointer((*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(i)*8))).Fomit = uint8(1)
(*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = int32(3) /* Both JSON and ROOT are supplied. Plan 3 */
}
}
return SQLITE_OK
} | 0.640012 | umputun/spot | vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go | umputun/spot | vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go | MIT | go |
C documentation
/*
** A constraint has failed while inserting a row into an rtree table.
** Assuming no OOM error occurs, this function sets the error message
** (at pRtree->base.zErrMsg) to an appropriate value and returns
** SQLITE_CONSTRAINT.
**
** Parameter iCol is the index of the leftmost column involved in the
** constraint failure. If it is 0, then the constraint that failed is
** the unique constraint on the id column. Otherwise, it is the rtree
** (c1<=c2) constraint on columns iCol and iCol+1 that has failed.
**
** If an OOM occurs, SQLITE_NOMEM is returned instead of SQLITE_CONSTRAINT.
*/ | func _rtreeConstraintError(tls *libc.TLS, pRtree uintptr, iCol int32) (r int32) {
bp := tls.Alloc(48)
defer tls.Free(48)
var rc, v1 int32
var zCol, zCol1, zCol2, zSql uintptr
var _ /* pStmt at bp+0 */ uintptr
_, _, _, _, _, _ = rc, zCol, zCol1, zCol2, zSql, v1
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
zSql = Xsqlite3_mprintf(tls, __ccgo_ts+26102, libc.VaList(bp+16, (*TRtree)(unsafe.Pointer(pRtree)).FzDb, (*TRtree)(unsafe.Pointer(pRtree)).FzName))
if zSql != 0 {
rc = Xsqlite3_prepare_v2(tls, (*TRtree)(unsafe.Pointer(pRtree)).Fdb, zSql, -int32(1), bp, uintptr(0))
} else {
rc = int32(SQLITE_NOMEM)
}
Xsqlite3_free(tls, zSql)
if rc == SQLITE_OK {
if iCol == 0 {
zCol = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp)), 0)
(*TRtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, __ccgo_ts+26122, libc.VaList(bp+16, (*TRtree)(unsafe.Pointer(pRtree)).FzName, zCol))
} else {
zCol1 = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp)), iCol)
zCol2 = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp)), iCol+int32(1))
(*TRtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, __ccgo_ts+26154, libc.VaList(bp+16, (*TRtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2))
}
}
Xsqlite3_finalize(tls, *(*uintptr)(unsafe.Pointer(bp)))
if rc == SQLITE_OK {
v1 = int32(SQLITE_CONSTRAINT)
} else {
v1 = rc
}
return v1
} | func _rtreeBestIndex(tls *libc.TLS, tab uintptr, pIdxInfo uintptr) (r int32) {
bp := tls.Alloc(48)
defer tls.Free(48)
var bMatch, iIdx, ii, jj, rc, v4, v5 int32
var doOmit, op Tu8
var nRow Ti64
var p, pRtree uintptr
var _ /* zIdxStr at bp+0 */ [41]int8
_, _, _, _, _, _, _, _, _, _, _, _ = bMatch, doOmit, iIdx, ii, jj, nRow, op, p, pRtree, rc, v4, v5
pRtree = tab
rc = SQLITE_OK
bMatch = 0 /* Estimated rows returned by this scan */
iIdx = 0
libc.Xmemset(tls, bp, 0, uint64(41))
/* Check if there exists a MATCH constraint - even an unusable one. If there
** is, do not consider the lookup-by-rowid plan as using such a plan would
** require the VDBE to evaluate the MATCH constraint, which is not currently
** possible. */
ii = 0
for {
if !(ii < (*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FnConstraint) {
break
}
if libc.Int32FromUint8((*(*Tsqlite3_index_constraint)(unsafe.Pointer((*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraint + uintptr(ii)*12))).Fop) == int32(SQLITE_INDEX_CONSTRAINT_MATCH) {
bMatch = int32(1)
}
goto _1
_1:
;
ii++
}
ii = 0
for {
if !(ii < (*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FnConstraint && iIdx < libc.Int32FromUint64(libc.Uint64FromInt64(41)-libc.Uint64FromInt32(1))) {
break
}
p = (*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraint + uintptr(ii)*12
if bMatch == 0 && (*Tsqlite3_index_constraint)(unsafe.Pointer(p)).Fusable != 0 && (*Tsqlite3_index_constraint)(unsafe.Pointer(p)).FiColumn <= 0 && libc.Int32FromUint8((*Tsqlite3_index_constraint)(unsafe.Pointer(p)).Fop) == int32(SQLITE_INDEX_CONSTRAINT_EQ) {
jj = 0
for {
if !(jj < ii) {
break
}
(*(*Tsqlite3_index_constraint_usage)(unsafe.Pointer((*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(jj)*8))).FargvIndex = 0
(*(*Tsqlite3_index_constraint_usage)(unsafe.Pointer((*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(jj)*8))).Fomit = uint8(0)
goto _3
_3:
;
jj++
}
(*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = int32(1)
(*(*Tsqlite3_index_constraint_usage)(unsafe.Pointer((*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(ii)*8))).FargvIndex = int32(1)
(*(*Tsqlite3_index_constraint_usage)(unsafe.Pointer((*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(jj)*8))).Fomit = uint8(1)
/* This strategy involves a two rowid lookups on an B-Tree structures
** and then a linear search of an R-Tree node. This should be
** considered almost as quick as a direct rowid lookup (for which
** sqlite uses an internal cost of 0.0). It is expected to return
** a single row.
*/
(*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = float64(30)
(*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedRows = int64(1)
(*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxFlags = int32(SQLITE_INDEX_SCAN_UNIQUE)
return SQLITE_OK
}
if (*Tsqlite3_index_constraint)(unsafe.Pointer(p)).Fusable != 0 && ((*Tsqlite3_index_constraint)(unsafe.Pointer(p)).FiColumn > 0 && (*Tsqlite3_index_constraint)(unsafe.Pointer(p)).FiColumn <= libc.Int32FromUint8((*TRtree)(unsafe.Pointer(pRtree)).FnDim2) || libc.Int32FromUint8((*Tsqlite3_index_constraint)(unsafe.Pointer(p)).Fop) == int32(SQLITE_INDEX_CONSTRAINT_MATCH)) {
doOmit = uint8(1)
switch libc.Int32FromUint8((*Tsqlite3_index_constraint)(unsafe.Pointer(p)).Fop) {
case int32(SQLITE_INDEX_CONSTRAINT_EQ):
op = uint8(RTREE_EQ)
doOmit = uint8(0)
case int32(SQLITE_INDEX_CONSTRAINT_GT):
op = uint8(RTREE_GT)
doOmit = uint8(0)
case int32(SQLITE_INDEX_CONSTRAINT_LE):
op = uint8(RTREE_LE)
case int32(SQLITE_INDEX_CONSTRAINT_LT):
op = uint8(RTREE_LT)
doOmit = uint8(0)
case int32(SQLITE_INDEX_CONSTRAINT_GE):
op = uint8(RTREE_GE)
case int32(SQLITE_INDEX_CONSTRAINT_MATCH):
op = uint8(RTREE_MATCH)
default:
op = uint8(0)
break
}
if op != 0 {
v4 = iIdx
iIdx++
(*(*[41]int8)(unsafe.Pointer(bp)))[v4] = libc.Int8FromUint8(op)
v5 = iIdx
iIdx++
(*(*[41]int8)(unsafe.Pointer(bp)))[v5] = int8((*Tsqlite3_index_constraint)(unsafe.Pointer(p)).FiColumn - libc.Int32FromInt32(1) + libc.Int32FromUint8('0'))
(*(*Tsqlite3_index_constraint_usage)(unsafe.Pointer((*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(ii)*8))).FargvIndex = iIdx / int32(2)
(*(*Tsqlite3_index_constraint_usage)(unsafe.Pointer((*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(ii)*8))).Fomit = doOmit
}
}
goto _2
_2:
;
ii++
}
(*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = int32(2)
(*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FneedToFreeIdxStr = int32(1)
if iIdx > 0 {
(*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = Xsqlite3_malloc(tls, iIdx+int32(1))
if (*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr == uintptr(0) {
return int32(SQLITE_NOMEM)
}
libc.Xmemcpy(tls, (*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr, bp, libc.Uint64FromInt32(iIdx+int32(1)))
}
nRow = (*TRtree)(unsafe.Pointer(pRtree)).FnRowEst >> (iIdx / int32(2))
(*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = libc.Float64FromFloat64(6) * float64(nRow)
(*Tsqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedRows = nRow
return rc
} | 0.639721 | umputun/spot | vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go | umputun/spot | vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go | MIT | go |
C documentation
/*
** A constraint has failed while inserting a row into an rtree table.
** Assuming no OOM error occurs, this function sets the error message
** (at pRtree->base.zErrMsg) to an appropriate value and returns
** SQLITE_CONSTRAINT.
**
** Parameter iCol is the index of the leftmost column involved in the
** constraint failure. If it is 0, then the constraint that failed is
** the unique constraint on the id column. Otherwise, it is the rtree
** (c1<=c2) constraint on columns iCol and iCol+1 that has failed.
**
** If an OOM occurs, SQLITE_NOMEM is returned instead of SQLITE_CONSTRAINT.
*/ | func _rtreeConstraintError(tls *libc.TLS, pRtree uintptr, iCol int32) (r int32) {
bp := tls.Alloc(48)
defer tls.Free(48)
var rc, v1 int32
var zCol, zCol1, zCol2, zSql uintptr
var _ /* pStmt at bp+0 */ uintptr
_, _, _, _, _, _ = rc, zCol, zCol1, zCol2, zSql, v1
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
zSql = Xsqlite3_mprintf(tls, __ccgo_ts+26102, libc.VaList(bp+16, (*TRtree)(unsafe.Pointer(pRtree)).FzDb, (*TRtree)(unsafe.Pointer(pRtree)).FzName))
if zSql != 0 {
rc = Xsqlite3_prepare_v2(tls, (*TRtree)(unsafe.Pointer(pRtree)).Fdb, zSql, -int32(1), bp, uintptr(0))
} else {
rc = int32(SQLITE_NOMEM)
}
Xsqlite3_free(tls, zSql)
if rc == SQLITE_OK {
if iCol == 0 {
zCol = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp)), 0)
(*TRtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, __ccgo_ts+26122, libc.VaList(bp+16, (*TRtree)(unsafe.Pointer(pRtree)).FzName, zCol))
} else {
zCol1 = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp)), iCol)
zCol2 = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp)), iCol+int32(1))
(*TRtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, __ccgo_ts+26154, libc.VaList(bp+16, (*TRtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2))
}
}
Xsqlite3_finalize(tls, *(*uintptr)(unsafe.Pointer(bp)))
if rc == SQLITE_OK {
v1 = int32(SQLITE_CONSTRAINT)
} else {
v1 = rc
}
return v1
} | func _rtreecheck(tls *libc.TLS, ctx uintptr, nArg int32, apArg uintptr) {
bp := tls.Alloc(16)
defer tls.Free(16)
var rc int32
var zDb, zTab, v1 uintptr
var _ /* zReport at bp+0 */ uintptr
_, _, _, _ = rc, zDb, zTab, v1
if nArg != int32(1) && nArg != int32(2) {
Xsqlite3_result_error(tls, ctx, __ccgo_ts+28381, -int32(1))
} else {
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
zDb = Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apArg)))
if nArg == int32(1) {
zTab = zDb
zDb = __ccgo_ts + 6713
} else {
zTab = Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apArg + 1*8)))
}
rc = _rtreeCheckTable(tls, Xsqlite3_context_db_handle(tls, ctx), zDb, zTab, bp)
if rc == SQLITE_OK {
if *(*uintptr)(unsafe.Pointer(bp)) != 0 {
v1 = *(*uintptr)(unsafe.Pointer(bp))
} else {
v1 = __ccgo_ts + 18615
}
Xsqlite3_result_text(tls, ctx, v1, -int32(1), uintptr(-libc.Int32FromInt32(1)))
} else {
Xsqlite3_result_error_code(tls, ctx, rc)
}
Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp)))
}
} | 0.636003 | umputun/spot | vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go | umputun/spot | vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go | MIT | go |
GetAttachmentWithContext is the same as GetAttachment with the addition of
the ability to pass a context and additional request options.
See GetAttachment for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *ConnectParticipant) GetAttachmentWithContext(ctx aws.Context, input *GetAttachmentInput, opts ...request.Option) (*GetAttachmentOutput, error) {
req, out := c.GetAttachmentRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *NetworkManager) GetConnectAttachmentWithContext(ctx aws.Context, input *GetConnectAttachmentInput, opts ...request.Option) (*GetConnectAttachmentOutput, error) {
req, out := c.GetConnectAttachmentRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.84896 | aws/aws-sdk-go | service/connectparticipant/api.go | aws/aws-sdk-go | service/networkmanager/api.go | Apache-2.0 | go |
GetAttachmentWithContext is the same as GetAttachment with the addition of
the ability to pass a context and additional request options.
See GetAttachment for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *ConnectParticipant) GetAttachmentWithContext(ctx aws.Context, input *GetAttachmentInput, opts ...request.Option) (*GetAttachmentOutput, error) {
req, out := c.GetAttachmentRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *Support) DescribeAttachmentWithContext(ctx aws.Context, input *DescribeAttachmentInput, opts ...request.Option) (*DescribeAttachmentOutput, error) {
req, out := c.DescribeAttachmentRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.83787 | aws/aws-sdk-go | service/connectparticipant/api.go | aws/aws-sdk-go | service/support/api.go | Apache-2.0 | go |
GetAttachmentWithContext is the same as GetAttachment with the addition of
the ability to pass a context and additional request options.
See GetAttachment for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *ConnectParticipant) GetAttachmentWithContext(ctx aws.Context, input *GetAttachmentInput, opts ...request.Option) (*GetAttachmentOutput, error) {
req, out := c.GetAttachmentRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *NetworkManager) DeleteAttachmentWithContext(ctx aws.Context, input *DeleteAttachmentInput, opts ...request.Option) (*DeleteAttachmentOutput, error) {
req, out := c.DeleteAttachmentRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.805849 | aws/aws-sdk-go | service/connectparticipant/api.go | aws/aws-sdk-go | service/networkmanager/api.go | Apache-2.0 | go |
GetAttachmentWithContext is the same as GetAttachment with the addition of
the ability to pass a context and additional request options.
See GetAttachment for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *ConnectParticipant) GetAttachmentWithContext(ctx aws.Context, input *GetAttachmentInput, opts ...request.Option) (*GetAttachmentOutput, error) {
req, out := c.GetAttachmentRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *NetworkManager) AcceptAttachmentWithContext(ctx aws.Context, input *AcceptAttachmentInput, opts ...request.Option) (*AcceptAttachmentOutput, error) {
req, out := c.AcceptAttachmentRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.754943 | aws/aws-sdk-go | service/connectparticipant/api.go | aws/aws-sdk-go | service/networkmanager/api.go | Apache-2.0 | go |
GetAttachmentWithContext is the same as GetAttachment with the addition of
the ability to pass a context and additional request options.
See GetAttachment for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *ConnectParticipant) GetAttachmentWithContext(ctx aws.Context, input *GetAttachmentInput, opts ...request.Option) (*GetAttachmentOutput, error) {
req, out := c.GetAttachmentRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *NetworkManager) RejectAttachmentWithContext(ctx aws.Context, input *RejectAttachmentInput, opts ...request.Option) (*RejectAttachmentOutput, error) {
req, out := c.RejectAttachmentRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.753094 | aws/aws-sdk-go | service/connectparticipant/api.go | aws/aws-sdk-go | service/networkmanager/api.go | Apache-2.0 | go |
getDescriptions loads OpenapiFiles for all the OpenAPI 3.0 description files in github/rest-api-description.
This assumes that all directories in "descriptions/" contain OpenAPI 3.0 description files with the same
name as the directory (plus the ".json" extension). For example, "descriptions/api.github.com/api.github.com.json".
Results are sorted by these rules:
- Directories that don't match any of the patterns in dirPatterns are removed.
- Directories are sorted by the pattern that matched in the same order they appear in dirPatterns.
- Directories are then sorted by major and minor version in descending order. | func getDescriptions(ctx context.Context, client *github.Client, gitRef string) ([]*openapiFile, error) {
_, dir, resp, err := client.Repositories.GetContents(
ctx,
descriptionsOwnerName,
descriptionsRepoName,
descriptionsPath,
&github.RepositoryContentGetOptions{Ref: gitRef},
)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return nil, fmt.Errorf("unexpected status code: %s", resp.Status)
}
files := make([]*openapiFile, 0, len(dir))
for _, d := range dir {
for i, pattern := range dirPatterns {
m := pattern.FindStringSubmatch(d.GetName())
if m == nil {
continue
}
file := openapiFile{
filename: fmt.Sprintf("descriptions/%s/%s.json", d.GetName(), d.GetName()),
plan: m[pattern.SubexpIndex("plan")],
planIdx: i,
}
rawMajor := m[pattern.SubexpIndex("major")]
if rawMajor != "" {
file.releaseMajor, err = strconv.Atoi(rawMajor)
if err != nil {
return nil, err
}
}
rawMinor := m[pattern.SubexpIndex("minor")]
if rawMinor != "" {
file.releaseMinor, err = strconv.Atoi(rawMinor)
if err != nil {
return nil, err
}
}
if file.plan == "ghes" && file.releaseMajor < 3 {
continue
}
files = append(files, &file)
break
}
}
sort.Slice(files, func(i, j int) bool {
return files[i].less(files[j])
})
g, ctx := errgroup.WithContext(ctx)
for _, file := range files {
f := file
g.Go(func() error {
return f.loadDescription(ctx, client, gitRef)
})
}
err = g.Wait()
if err != nil {
return nil, err
}
return files, nil
} | func (fl *FileSystemLoader) readOpenAPIFiles(openApiDir string) ([]byte /*configValuesBytes*/, []byte /*valuesBytes*/, error) {
return utils.ReadOpenAPIFiles(openApiDir)
} | 0.493254 | google/go-github | tools/metadata/openapi.go | flant/addon-operator | pkg/module_manager/loader/fs/fs.go | Apache-2.0 | go |
getDescriptions loads OpenapiFiles for all the OpenAPI 3.0 description files in github/rest-api-description.
This assumes that all directories in "descriptions/" contain OpenAPI 3.0 description files with the same
name as the directory (plus the ".json" extension). For example, "descriptions/api.github.com/api.github.com.json".
Results are sorted by these rules:
- Directories that don't match any of the patterns in dirPatterns are removed.
- Directories are sorted by the pattern that matched in the same order they appear in dirPatterns.
- Directories are then sorted by major and minor version in descending order. | func getDescriptions(ctx context.Context, client *github.Client, gitRef string) ([]*openapiFile, error) {
_, dir, resp, err := client.Repositories.GetContents(
ctx,
descriptionsOwnerName,
descriptionsRepoName,
descriptionsPath,
&github.RepositoryContentGetOptions{Ref: gitRef},
)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return nil, fmt.Errorf("unexpected status code: %s", resp.Status)
}
files := make([]*openapiFile, 0, len(dir))
for _, d := range dir {
for i, pattern := range dirPatterns {
m := pattern.FindStringSubmatch(d.GetName())
if m == nil {
continue
}
file := openapiFile{
filename: fmt.Sprintf("descriptions/%s/%s.json", d.GetName(), d.GetName()),
plan: m[pattern.SubexpIndex("plan")],
planIdx: i,
}
rawMajor := m[pattern.SubexpIndex("major")]
if rawMajor != "" {
file.releaseMajor, err = strconv.Atoi(rawMajor)
if err != nil {
return nil, err
}
}
rawMinor := m[pattern.SubexpIndex("minor")]
if rawMinor != "" {
file.releaseMinor, err = strconv.Atoi(rawMinor)
if err != nil {
return nil, err
}
}
if file.plan == "ghes" && file.releaseMajor < 3 {
continue
}
files = append(files, &file)
break
}
}
sort.Slice(files, func(i, j int) bool {
return files[i].less(files[j])
})
g, ctx := errgroup.WithContext(ctx)
for _, file := range files {
f := file
g.Go(func() error {
return f.loadDescription(ctx, client, gitRef)
})
}
err = g.Wait()
if err != nil {
return nil, err
}
return files, nil
} | func ReadOpenAPIFiles(openApiDir string) ([]byte /*configValuesBytes*/, []byte /*valuesBytes*/, error) {
if openApiDir == "" {
return nil, nil, nil
}
if _, err := os.Stat(openApiDir); os.IsNotExist(err) {
return nil, nil, nil
}
configValuesBytes := make([]byte, 0)
configPath := filepath.Join(openApiDir, ConfigValuesFileName)
if _, err := os.Stat(configPath); !os.IsNotExist(err) {
configValuesBytes, err = os.ReadFile(configPath)
if err != nil {
return nil, nil, fmt.Errorf("read file '%s': %w", configPath, err)
}
}
valuesBytes := make([]byte, 0)
valuesPath := filepath.Join(openApiDir, ValuesFileName)
if _, err := os.Stat(valuesPath); !os.IsNotExist(err) {
valuesBytes, err = os.ReadFile(valuesPath)
if err != nil {
return nil, nil, fmt.Errorf("read file '%s': %w", valuesPath, err)
}
}
return configValuesBytes, valuesBytes, nil
} | 0.477741 | google/go-github | tools/metadata/openapi.go | flant/addon-operator | pkg/utils/loader.go | Apache-2.0 | go |
getDescriptions loads OpenapiFiles for all the OpenAPI 3.0 description files in github/rest-api-description.
This assumes that all directories in "descriptions/" contain OpenAPI 3.0 description files with the same
name as the directory (plus the ".json" extension). For example, "descriptions/api.github.com/api.github.com.json".
Results are sorted by these rules:
- Directories that don't match any of the patterns in dirPatterns are removed.
- Directories are sorted by the pattern that matched in the same order they appear in dirPatterns.
- Directories are then sorted by major and minor version in descending order. | func getDescriptions(ctx context.Context, client *github.Client, gitRef string) ([]*openapiFile, error) {
_, dir, resp, err := client.Repositories.GetContents(
ctx,
descriptionsOwnerName,
descriptionsRepoName,
descriptionsPath,
&github.RepositoryContentGetOptions{Ref: gitRef},
)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return nil, fmt.Errorf("unexpected status code: %s", resp.Status)
}
files := make([]*openapiFile, 0, len(dir))
for _, d := range dir {
for i, pattern := range dirPatterns {
m := pattern.FindStringSubmatch(d.GetName())
if m == nil {
continue
}
file := openapiFile{
filename: fmt.Sprintf("descriptions/%s/%s.json", d.GetName(), d.GetName()),
plan: m[pattern.SubexpIndex("plan")],
planIdx: i,
}
rawMajor := m[pattern.SubexpIndex("major")]
if rawMajor != "" {
file.releaseMajor, err = strconv.Atoi(rawMajor)
if err != nil {
return nil, err
}
}
rawMinor := m[pattern.SubexpIndex("minor")]
if rawMinor != "" {
file.releaseMinor, err = strconv.Atoi(rawMinor)
if err != nil {
return nil, err
}
}
if file.plan == "ghes" && file.releaseMajor < 3 {
continue
}
files = append(files, &file)
break
}
}
sort.Slice(files, func(i, j int) bool {
return files[i].less(files[j])
})
g, ctx := errgroup.WithContext(ctx)
for _, file := range files {
f := file
g.Go(func() error {
return f.loadDescription(ctx, client, gitRef)
})
}
err = g.Wait()
if err != nil {
return nil, err
}
return files, nil
} | func GetSwagger() (swagger *openapi3.T, err error) {
resolvePath := PathToRawSpec("")
loader := openapi3.NewLoader()
loader.IsExternalRefsAllowed = true
loader.ReadFromURIFunc = func(loader *openapi3.Loader, url *url.URL) ([]byte, error) {
pathToFile := url.String()
pathToFile = path.Clean(pathToFile)
getSpec, ok := resolvePath[pathToFile]
if !ok {
err1 := fmt.Errorf("path not found: %s", pathToFile)
return nil, err1
}
return getSpec()
}
var specData []byte
specData, err = rawSpec()
if err != nil {
return
}
swagger, err = loader.LoadFromData(specData)
if err != nil {
return
}
return
} | 0.476343 | google/go-github | tools/metadata/openapi.go | openmeterio/openmeter | api/api.gen.go | Apache-2.0 | go |
getDescriptions loads OpenapiFiles for all the OpenAPI 3.0 description files in github/rest-api-description.
This assumes that all directories in "descriptions/" contain OpenAPI 3.0 description files with the same
name as the directory (plus the ".json" extension). For example, "descriptions/api.github.com/api.github.com.json".
Results are sorted by these rules:
- Directories that don't match any of the patterns in dirPatterns are removed.
- Directories are sorted by the pattern that matched in the same order they appear in dirPatterns.
- Directories are then sorted by major and minor version in descending order. | func getDescriptions(ctx context.Context, client *github.Client, gitRef string) ([]*openapiFile, error) {
_, dir, resp, err := client.Repositories.GetContents(
ctx,
descriptionsOwnerName,
descriptionsRepoName,
descriptionsPath,
&github.RepositoryContentGetOptions{Ref: gitRef},
)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return nil, fmt.Errorf("unexpected status code: %s", resp.Status)
}
files := make([]*openapiFile, 0, len(dir))
for _, d := range dir {
for i, pattern := range dirPatterns {
m := pattern.FindStringSubmatch(d.GetName())
if m == nil {
continue
}
file := openapiFile{
filename: fmt.Sprintf("descriptions/%s/%s.json", d.GetName(), d.GetName()),
plan: m[pattern.SubexpIndex("plan")],
planIdx: i,
}
rawMajor := m[pattern.SubexpIndex("major")]
if rawMajor != "" {
file.releaseMajor, err = strconv.Atoi(rawMajor)
if err != nil {
return nil, err
}
}
rawMinor := m[pattern.SubexpIndex("minor")]
if rawMinor != "" {
file.releaseMinor, err = strconv.Atoi(rawMinor)
if err != nil {
return nil, err
}
}
if file.plan == "ghes" && file.releaseMajor < 3 {
continue
}
files = append(files, &file)
break
}
}
sort.Slice(files, func(i, j int) bool {
return files[i].less(files[j])
})
g, ctx := errgroup.WithContext(ctx)
for _, file := range files {
f := file
g.Go(func() error {
return f.loadDescription(ctx, client, gitRef)
})
}
err = g.Wait()
if err != nil {
return nil, err
}
return files, nil
} | func ReadPatterns(fs billy.Filesystem, path []string) (ps []gitignore.Pattern, err error) {
ps, _ = readIgnoreFile(fs, path, infoExcludeFile)
subps, err := ReadPatternsIgnoringDirs(fs, path, ps)
ps = append(ps, subps...)
return ps, err
} | 0.441767 | google/go-github | tools/metadata/openapi.go | google/osv-scanner | internal/customgitignore/dir.go | Apache-2.0 | go |
getDescriptions loads OpenapiFiles for all the OpenAPI 3.0 description files in github/rest-api-description.
This assumes that all directories in "descriptions/" contain OpenAPI 3.0 description files with the same
name as the directory (plus the ".json" extension). For example, "descriptions/api.github.com/api.github.com.json".
Results are sorted by these rules:
- Directories that don't match any of the patterns in dirPatterns are removed.
- Directories are sorted by the pattern that matched in the same order they appear in dirPatterns.
- Directories are then sorted by major and minor version in descending order. | func getDescriptions(ctx context.Context, client *github.Client, gitRef string) ([]*openapiFile, error) {
_, dir, resp, err := client.Repositories.GetContents(
ctx,
descriptionsOwnerName,
descriptionsRepoName,
descriptionsPath,
&github.RepositoryContentGetOptions{Ref: gitRef},
)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return nil, fmt.Errorf("unexpected status code: %s", resp.Status)
}
files := make([]*openapiFile, 0, len(dir))
for _, d := range dir {
for i, pattern := range dirPatterns {
m := pattern.FindStringSubmatch(d.GetName())
if m == nil {
continue
}
file := openapiFile{
filename: fmt.Sprintf("descriptions/%s/%s.json", d.GetName(), d.GetName()),
plan: m[pattern.SubexpIndex("plan")],
planIdx: i,
}
rawMajor := m[pattern.SubexpIndex("major")]
if rawMajor != "" {
file.releaseMajor, err = strconv.Atoi(rawMajor)
if err != nil {
return nil, err
}
}
rawMinor := m[pattern.SubexpIndex("minor")]
if rawMinor != "" {
file.releaseMinor, err = strconv.Atoi(rawMinor)
if err != nil {
return nil, err
}
}
if file.plan == "ghes" && file.releaseMajor < 3 {
continue
}
files = append(files, &file)
break
}
}
sort.Slice(files, func(i, j int) bool {
return files[i].less(files[j])
})
g, ctx := errgroup.WithContext(ctx)
for _, file := range files {
f := file
g.Go(func() error {
return f.loadDescription(ctx, client, gitRef)
})
}
err = g.Wait()
if err != nil {
return nil, err
}
return files, nil
} | func findJsonFiles(files []string) []string {
out := []string{}
recurse := true // @TODO potentially in the future we may make this an argument / flag
for _, file := range files {
info, err := os.Stat(file)
if !os.IsNotExist(err) {
if !info.IsDir() {
if filepath.Ext(file) == ".json" {
out = append(out, file)
}
} else {
if recurse {
re_err := filepath.Walk(file, func(path string, re_info os.FileInfo, err error) error {
if !re_info.IsDir() && filepath.Ext(path) == ".json" {
out = append(out, path)
}
return nil
})
if re_err != nil {
panic(re_err) // @TODO - handle this error better
}
}
}
}
}
return out
} | 0.426173 | google/go-github | tools/metadata/openapi.go | threatcl/threatcl | cmd/threatcl/util.go | MIT | go |
UpdateResourcePolicyWithContext is the same as UpdateResourcePolicy with the addition of
the ability to pass a context and additional request options.
See UpdateResourcePolicy for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *LexModelsV2) UpdateResourcePolicyWithContext(ctx aws.Context, input *UpdateResourcePolicyInput, opts ...request.Option) (*UpdateResourcePolicyOutput, error) {
req, out := c.UpdateResourcePolicyRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *DynamoDB) PutResourcePolicyWithContext(ctx aws.Context, input *PutResourcePolicyInput, opts ...request.Option) (*PutResourcePolicyOutput, error) {
req, out := c.PutResourcePolicyRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.86838 | aws/aws-sdk-go | service/lexmodelsv2/api.go | tektoncd/cli | vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go | Apache-2.0 | go |
UpdateResourcePolicyWithContext is the same as UpdateResourcePolicy with the addition of
the ability to pass a context and additional request options.
See UpdateResourcePolicy for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *LexModelsV2) UpdateResourcePolicyWithContext(ctx aws.Context, input *UpdateResourcePolicyInput, opts ...request.Option) (*UpdateResourcePolicyOutput, error) {
req, out := c.UpdateResourcePolicyRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *DynamoDB) DeleteResourcePolicyWithContext(ctx aws.Context, input *DeleteResourcePolicyInput, opts ...request.Option) (*DeleteResourcePolicyOutput, error) {
req, out := c.DeleteResourcePolicyRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.853952 | aws/aws-sdk-go | service/lexmodelsv2/api.go | tektoncd/cli | vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go | Apache-2.0 | go |
UpdateResourcePolicyWithContext is the same as UpdateResourcePolicy with the addition of
the ability to pass a context and additional request options.
See UpdateResourcePolicy for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *LexModelsV2) UpdateResourcePolicyWithContext(ctx aws.Context, input *UpdateResourcePolicyInput, opts ...request.Option) (*UpdateResourcePolicyOutput, error) {
req, out := c.UpdateResourcePolicyRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *VerifiedPermissions) UpdatePolicyWithContext(ctx aws.Context, input *UpdatePolicyInput, opts ...request.Option) (*UpdatePolicyOutput, error) {
req, out := c.UpdatePolicyRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.845899 | aws/aws-sdk-go | service/lexmodelsv2/api.go | aws/aws-sdk-go | service/verifiedpermissions/api.go | Apache-2.0 | go |
UpdateResourcePolicyWithContext is the same as UpdateResourcePolicy with the addition of
the ability to pass a context and additional request options.
See UpdateResourcePolicy for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *LexModelsV2) UpdateResourcePolicyWithContext(ctx aws.Context, input *UpdateResourcePolicyInput, opts ...request.Option) (*UpdateResourcePolicyOutput, error) {
req, out := c.UpdateResourcePolicyRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *SecretsManager) ValidateResourcePolicyWithContext(ctx aws.Context, input *ValidateResourcePolicyInput, opts ...request.Option) (*ValidateResourcePolicyOutput, error) {
req, out := c.ValidateResourcePolicyRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.841959 | aws/aws-sdk-go | service/lexmodelsv2/api.go | aws/aws-sdk-go | service/secretsmanager/api.go | Apache-2.0 | go |
UpdateResourcePolicyWithContext is the same as UpdateResourcePolicy with the addition of
the ability to pass a context and additional request options.
See UpdateResourcePolicy for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *LexModelsV2) UpdateResourcePolicyWithContext(ctx aws.Context, input *UpdateResourcePolicyInput, opts ...request.Option) (*UpdateResourcePolicyOutput, error) {
req, out := c.UpdateResourcePolicyRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *LexModelsV2) CreateResourcePolicyWithContext(ctx aws.Context, input *CreateResourcePolicyInput, opts ...request.Option) (*CreateResourcePolicyOutput, error) {
req, out := c.CreateResourcePolicyRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.81915 | aws/aws-sdk-go | service/lexmodelsv2/api.go | aws/aws-sdk-go | service/lexmodelsv2/api.go | Apache-2.0 | go |
UntagResourceWithContext is the same as UntagResource with the addition of
the ability to pass a context and additional request options.
See UntagResource for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts.
Deprecated: since: 2022-08-30 | func (c *IoTThingsGraph) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) {
req, out := c.UntagResourceRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *DynamoDB) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) {
req, out := c.UntagResourceRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.977121 | aws/aws-sdk-go | service/iotthingsgraph/api.go | tektoncd/cli | vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go | Apache-2.0 | go |
UntagResourceWithContext is the same as UntagResource with the addition of
the ability to pass a context and additional request options.
See UntagResource for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts.
Deprecated: since: 2022-08-30 | func (c *IoTThingsGraph) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) {
req, out := c.UntagResourceRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *Chime) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) {
req, out := c.UntagResourceRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.971012 | aws/aws-sdk-go | service/iotthingsgraph/api.go | aws/aws-sdk-go | service/chime/api.go | Apache-2.0 | go |
UntagResourceWithContext is the same as UntagResource with the addition of
the ability to pass a context and additional request options.
See UntagResource for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts.
Deprecated: since: 2022-08-30 | func (c *IoTThingsGraph) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) {
req, out := c.UntagResourceRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *WorkLink) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) {
req, out := c.UntagResourceRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.957796 | aws/aws-sdk-go | service/iotthingsgraph/api.go | aws/aws-sdk-go | service/worklink/api.go | Apache-2.0 | go |
UntagResourceWithContext is the same as UntagResource with the addition of
the ability to pass a context and additional request options.
See UntagResource for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts.
Deprecated: since: 2022-08-30 | func (c *IoTThingsGraph) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) {
req, out := c.UntagResourceRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *ResourceGroups) UntagWithContext(ctx aws.Context, input *UntagInput, opts ...request.Option) (*UntagOutput, error) {
req, out := c.UntagRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.929436 | aws/aws-sdk-go | service/iotthingsgraph/api.go | aws/aws-sdk-go | service/resourcegroups/api.go | Apache-2.0 | go |
UntagResourceWithContext is the same as UntagResource with the addition of
the ability to pass a context and additional request options.
See UntagResource for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts.
Deprecated: since: 2022-08-30 | func (c *IoTThingsGraph) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) {
req, out := c.UntagResourceRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *ResourceGroupsTaggingAPI) UntagResourcesWithContext(ctx aws.Context, input *UntagResourcesInput, opts ...request.Option) (*UntagResourcesOutput, error) {
req, out := c.UntagResourcesRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.923916 | aws/aws-sdk-go | service/iotthingsgraph/api.go | aws/aws-sdk-go | service/resourcegroupstaggingapi/api.go | Apache-2.0 | go |
Cause returns nil for MultiError; there is no unique underlying cause in the
general case.
Note: we could conceivably return a non-nil Cause only when there is exactly
one child error with a Cause. However, it would be brittle for client code
to rely on the arity of causes inside a MultiError, so we have opted not to
provide this functionality. Clients which really wish to access the Causes
of the underlying errors are free to iterate through the errors themselves. | func (m MultiError) Cause() error { return nil } | func Cause(err error) error {
type causer interface {
Cause() error
}
for err != nil {
cause, ok := err.(causer)
if !ok {
break
}
err = cause.Cause()
}
return err
} | 0.73806 | kabukky/journey | vendor/github.com/gorilla/securecookie/securecookie.go | k8snetworkplumbingwg/multus-cni | vendor/github.com/pkg/errors/errors.go | Apache-2.0 | go |
Cause returns nil for MultiError; there is no unique underlying cause in the
general case.
Note: we could conceivably return a non-nil Cause only when there is exactly
one child error with a Cause. However, it would be brittle for client code
to rely on the arity of causes inside a MultiError, so we have opted not to
provide this functionality. Clients which really wish to access the Causes
of the underlying errors are free to iterate through the errors themselves. | func (m MultiError) Cause() error { return nil } | func (e *Err) Cause() error {
return e.cause
} | 0.68812 | kabukky/journey | vendor/github.com/gorilla/securecookie/securecookie.go | cloudbase/garm | vendor/github.com/juju/errors/error.go | Apache-2.0 | go |
Cause returns nil for MultiError; there is no unique underlying cause in the
general case.
Note: we could conceivably return a non-nil Cause only when there is exactly
one child error with a Cause. However, it would be brittle for client code
to rely on the arity of causes inside a MultiError, so we have opted not to
provide this functionality. Clients which really wish to access the Causes
of the underlying errors are free to iterate through the errors themselves. | func (m MultiError) Cause() error { return nil } | func Cause(err error) (cause error) {
cause = err
const maxDepth = 100
for i := 0; i <= maxDepth; i++ {
res := errors.Unwrap(cause)
if res == nil {
return cause
}
cause = res
}
logrus.Errorf("Max error depth of %d reached, cannot unwrap until root cause: %v", maxDepth, err)
return cause
} | 0.579597 | kabukky/journey | vendor/github.com/gorilla/securecookie/securecookie.go | containers/podman-tui | vendor/github.com/containers/podman/v5/pkg/errorhandling/errorhandling.go | Apache-2.0 | go |
Cause returns nil for MultiError; there is no unique underlying cause in the
general case.
Note: we could conceivably return a non-nil Cause only when there is exactly
one child error with a Cause. However, it would be brittle for client code
to rely on the arity of causes inside a MultiError, so we have opted not to
provide this functionality. Clients which really wish to access the Causes
of the underlying errors are free to iterate through the errors themselves. | func (m MultiError) Cause() error { return nil } | func Cause(err error) error {
var diag error
if err, ok := err.(causer); ok {
diag = err.Cause()
}
if diag != nil {
return diag
}
return err
} | 0.57695 | kabukky/journey | vendor/github.com/gorilla/securecookie/securecookie.go | cloudbase/garm | vendor/github.com/juju/errors/functions.go | Apache-2.0 | go |
Cause returns nil for MultiError; there is no unique underlying cause in the
general case.
Note: we could conceivably return a non-nil Cause only when there is exactly
one child error with a Cause. However, it would be brittle for client code
to rely on the arity of causes inside a MultiError, so we have opted not to
provide this functionality. Clients which really wish to access the Causes
of the underlying errors are free to iterate through the errors themselves. | func (m MultiError) Cause() error { return nil } | func (cr *checkResult) error() error {
if len(cr.errors) == 0 {
return nil
}
return fmt.Errorf("cert failed constraints check: %+q", cr.errors)
} | 0.573877 | kabukky/journey | vendor/github.com/gorilla/securecookie/securecookie.go | moby/buildkit | vendor/github.com/in-toto/in-toto-golang/in_toto/certconstraint.go | Apache-2.0 | go |
DescribeDBProxiesPages iterates over the pages of a DescribeDBProxies operation,
calling the "fn" function with the response data for each page. To stop
iterating, return false from the fn function.
See DescribeDBProxies method for more information on how to use this operation.
Note: This operation can generate multiple requests to a service.
// Example iterating over at most 3 pages of a DescribeDBProxies operation.
pageNum := 0
err := client.DescribeDBProxiesPages(params,
func(page *rds.DescribeDBProxiesOutput, lastPage bool) bool {
pageNum++
fmt.Println(page)
return pageNum <= 3
}) | func (c *RDS) DescribeDBProxiesPages(input *DescribeDBProxiesInput, fn func(*DescribeDBProxiesOutput, bool) bool) error {
return c.DescribeDBProxiesPagesWithContext(aws.BackgroundContext(), input, fn)
} | func (c *RDS) DescribeDBProxyTargetsPages(input *DescribeDBProxyTargetsInput, fn func(*DescribeDBProxyTargetsOutput, bool) bool) error {
return c.DescribeDBProxyTargetsPagesWithContext(aws.BackgroundContext(), input, fn)
} | 0.853674 | aws/aws-sdk-go | service/rds/api.go | aws/aws-sdk-go | service/rds/api.go | Apache-2.0 | go |
DescribeDBProxiesPages iterates over the pages of a DescribeDBProxies operation,
calling the "fn" function with the response data for each page. To stop
iterating, return false from the fn function.
See DescribeDBProxies method for more information on how to use this operation.
Note: This operation can generate multiple requests to a service.
// Example iterating over at most 3 pages of a DescribeDBProxies operation.
pageNum := 0
err := client.DescribeDBProxiesPages(params,
func(page *rds.DescribeDBProxiesOutput, lastPage bool) bool {
pageNum++
fmt.Println(page)
return pageNum <= 3
}) | func (c *RDS) DescribeDBProxiesPages(input *DescribeDBProxiesInput, fn func(*DescribeDBProxiesOutput, bool) bool) error {
return c.DescribeDBProxiesPagesWithContext(aws.BackgroundContext(), input, fn)
} | func (c *RDS) DescribeDBProxyTargetGroupsPages(input *DescribeDBProxyTargetGroupsInput, fn func(*DescribeDBProxyTargetGroupsOutput, bool) bool) error {
return c.DescribeDBProxyTargetGroupsPagesWithContext(aws.BackgroundContext(), input, fn)
} | 0.826153 | aws/aws-sdk-go | service/rds/api.go | aws/aws-sdk-go | service/rds/api.go | Apache-2.0 | go |
DescribeDBProxiesPages iterates over the pages of a DescribeDBProxies operation,
calling the "fn" function with the response data for each page. To stop
iterating, return false from the fn function.
See DescribeDBProxies method for more information on how to use this operation.
Note: This operation can generate multiple requests to a service.
// Example iterating over at most 3 pages of a DescribeDBProxies operation.
pageNum := 0
err := client.DescribeDBProxiesPages(params,
func(page *rds.DescribeDBProxiesOutput, lastPage bool) bool {
pageNum++
fmt.Println(page)
return pageNum <= 3
}) | func (c *RDS) DescribeDBProxiesPages(input *DescribeDBProxiesInput, fn func(*DescribeDBProxiesOutput, bool) bool) error {
return c.DescribeDBProxiesPagesWithContext(aws.BackgroundContext(), input, fn)
} | func (c *RDS) DescribeDBProxiesRequest(input *DescribeDBProxiesInput) (req *request.Request, output *DescribeDBProxiesOutput) {
op := &request.Operation{
Name: opDescribeDBProxies,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &request.Paginator{
InputTokens: []string{"Marker"},
OutputTokens: []string{"Marker"},
LimitToken: "MaxRecords",
TruncationToken: "",
},
}
if input == nil {
input = &DescribeDBProxiesInput{}
}
output = &DescribeDBProxiesOutput{}
req = c.newRequest(op, input, output)
return
} | 0.82202 | aws/aws-sdk-go | service/rds/api.go | aws/aws-sdk-go | service/rds/api.go | Apache-2.0 | go |
DescribeDBProxiesPages iterates over the pages of a DescribeDBProxies operation,
calling the "fn" function with the response data for each page. To stop
iterating, return false from the fn function.
See DescribeDBProxies method for more information on how to use this operation.
Note: This operation can generate multiple requests to a service.
// Example iterating over at most 3 pages of a DescribeDBProxies operation.
pageNum := 0
err := client.DescribeDBProxiesPages(params,
func(page *rds.DescribeDBProxiesOutput, lastPage bool) bool {
pageNum++
fmt.Println(page)
return pageNum <= 3
}) | func (c *RDS) DescribeDBProxiesPages(input *DescribeDBProxiesInput, fn func(*DescribeDBProxiesOutput, bool) bool) error {
return c.DescribeDBProxiesPagesWithContext(aws.BackgroundContext(), input, fn)
} | func (c *RDS) DescribeDBProxiesPagesWithContext(ctx aws.Context, input *DescribeDBProxiesInput, fn func(*DescribeDBProxiesOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *DescribeDBProxiesInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.DescribeDBProxiesRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
for p.Next() {
if !fn(p.Page().(*DescribeDBProxiesOutput), !p.HasNextPage()) {
break
}
}
return p.Err()
} | 0.818901 | aws/aws-sdk-go | service/rds/api.go | aws/aws-sdk-go | service/rds/api.go | Apache-2.0 | go |
DescribeDBProxiesPages iterates over the pages of a DescribeDBProxies operation,
calling the "fn" function with the response data for each page. To stop
iterating, return false from the fn function.
See DescribeDBProxies method for more information on how to use this operation.
Note: This operation can generate multiple requests to a service.
// Example iterating over at most 3 pages of a DescribeDBProxies operation.
pageNum := 0
err := client.DescribeDBProxiesPages(params,
func(page *rds.DescribeDBProxiesOutput, lastPage bool) bool {
pageNum++
fmt.Println(page)
return pageNum <= 3
}) | func (c *RDS) DescribeDBProxiesPages(input *DescribeDBProxiesInput, fn func(*DescribeDBProxiesOutput, bool) bool) error {
return c.DescribeDBProxiesPagesWithContext(aws.BackgroundContext(), input, fn)
} | func (c *RDS) DescribeDBProxyEndpointsPages(input *DescribeDBProxyEndpointsInput, fn func(*DescribeDBProxyEndpointsOutput, bool) bool) error {
return c.DescribeDBProxyEndpointsPagesWithContext(aws.BackgroundContext(), input, fn)
} | 0.809777 | aws/aws-sdk-go | service/rds/api.go | aws/aws-sdk-go | service/rds/api.go | Apache-2.0 | go |
ListTagsForResourceRequest generates a "aws/request.Request" representing the
client's request for the ListTagsForResource operation. The "output" return
value will be populated with the request's response once the request completes
successfully.
Use "Send" method on the returned Request to send the API call to the service.
the "output" return value is not valid until after Send returns without error.
See ListTagsForResource for more information on using the ListTagsForResource
API call, and error handling.
This method is useful when you want to inject custom logic or configuration
into the SDK's request lifecycle. Such as custom headers, or retry logic.
// Example sending a request using the ListTagsForResourceRequest method.
req, resp := client.ListTagsForResourceRequest(params)
err := req.Send()
if err == nil { // resp is now filled
fmt.Println(resp)
}
See also, https://docs.aws.amazon.com/goto/WebAPI/finspace-2021-03-12/ListTagsForResource | func (c *Finspace) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) {
op := &request.Operation{
Name: opListTagsForResource,
HTTPMethod: "GET",
HTTPPath: "/tags/{resourceArn}",
}
if input == nil {
input = &ListTagsForResourceInput{}
}
output = &ListTagsForResourceOutput{}
req = c.newRequest(op, input, output)
return
} | func (c *Repostspace) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) {
op := &request.Operation{
Name: opListTagsForResource,
HTTPMethod: "GET",
HTTPPath: "/tags/{resourceArn}",
}
if input == nil {
input = &ListTagsForResourceInput{}
}
output = &ListTagsForResourceOutput{}
req = c.newRequest(op, input, output)
return
} | 0.998147 | aws/aws-sdk-go | service/finspace/api.go | aws/aws-sdk-go | service/repostspace/api.go | Apache-2.0 | go |
ListTagsForResourceRequest generates a "aws/request.Request" representing the
client's request for the ListTagsForResource operation. The "output" return
value will be populated with the request's response once the request completes
successfully.
Use "Send" method on the returned Request to send the API call to the service.
the "output" return value is not valid until after Send returns without error.
See ListTagsForResource for more information on using the ListTagsForResource
API call, and error handling.
This method is useful when you want to inject custom logic or configuration
into the SDK's request lifecycle. Such as custom headers, or retry logic.
// Example sending a request using the ListTagsForResourceRequest method.
req, resp := client.ListTagsForResourceRequest(params)
err := req.Send()
if err == nil { // resp is now filled
fmt.Println(resp)
}
See also, https://docs.aws.amazon.com/goto/WebAPI/finspace-2021-03-12/ListTagsForResource | func (c *Finspace) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) {
op := &request.Operation{
Name: opListTagsForResource,
HTTPMethod: "GET",
HTTPPath: "/tags/{resourceArn}",
}
if input == nil {
input = &ListTagsForResourceInput{}
}
output = &ListTagsForResourceOutput{}
req = c.newRequest(op, input, output)
return
} | func (c *NimbleStudio) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) {
op := &request.Operation{
Name: opListTagsForResource,
HTTPMethod: "GET",
HTTPPath: "/2020-08-01/tags/{resourceArn}",
}
if input == nil {
input = &ListTagsForResourceInput{}
}
output = &ListTagsForResourceOutput{}
req = c.newRequest(op, input, output)
return
} | 0.997672 | aws/aws-sdk-go | service/finspace/api.go | aws/aws-sdk-go | service/nimblestudio/api.go | Apache-2.0 | go |
ListTagsForResourceRequest generates a "aws/request.Request" representing the
client's request for the ListTagsForResource operation. The "output" return
value will be populated with the request's response once the request completes
successfully.
Use "Send" method on the returned Request to send the API call to the service.
the "output" return value is not valid until after Send returns without error.
See ListTagsForResource for more information on using the ListTagsForResource
API call, and error handling.
This method is useful when you want to inject custom logic or configuration
into the SDK's request lifecycle. Such as custom headers, or retry logic.
// Example sending a request using the ListTagsForResourceRequest method.
req, resp := client.ListTagsForResourceRequest(params)
err := req.Send()
if err == nil { // resp is now filled
fmt.Println(resp)
}
See also, https://docs.aws.amazon.com/goto/WebAPI/finspace-2021-03-12/ListTagsForResource | func (c *Finspace) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) {
op := &request.Operation{
Name: opListTagsForResource,
HTTPMethod: "GET",
HTTPPath: "/tags/{resourceArn}",
}
if input == nil {
input = &ListTagsForResourceInput{}
}
output = &ListTagsForResourceOutput{}
req = c.newRequest(op, input, output)
return
} | func (c *OAM) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) {
op := &request.Operation{
Name: opListTagsForResource,
HTTPMethod: "GET",
HTTPPath: "/tags/{ResourceArn}",
}
if input == nil {
input = &ListTagsForResourceInput{}
}
output = &ListTagsForResourceOutput{}
req = c.newRequest(op, input, output)
return
} | 0.997505 | aws/aws-sdk-go | service/finspace/api.go | aws/aws-sdk-go | service/oam/api.go | Apache-2.0 | go |
ListTagsForResourceRequest generates a "aws/request.Request" representing the
client's request for the ListTagsForResource operation. The "output" return
value will be populated with the request's response once the request completes
successfully.
Use "Send" method on the returned Request to send the API call to the service.
the "output" return value is not valid until after Send returns without error.
See ListTagsForResource for more information on using the ListTagsForResource
API call, and error handling.
This method is useful when you want to inject custom logic or configuration
into the SDK's request lifecycle. Such as custom headers, or retry logic.
// Example sending a request using the ListTagsForResourceRequest method.
req, resp := client.ListTagsForResourceRequest(params)
err := req.Send()
if err == nil { // resp is now filled
fmt.Println(resp)
}
See also, https://docs.aws.amazon.com/goto/WebAPI/finspace-2021-03-12/ListTagsForResource | func (c *Finspace) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) {
op := &request.Operation{
Name: opListTagsForResource,
HTTPMethod: "GET",
HTTPPath: "/tags/{resourceArn}",
}
if input == nil {
input = &ListTagsForResourceInput{}
}
output = &ListTagsForResourceOutput{}
req = c.newRequest(op, input, output)
return
} | func (c *RecycleBin) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) {
op := &request.Operation{
Name: opListTagsForResource,
HTTPMethod: "GET",
HTTPPath: "/tags/{resourceArn}",
}
if input == nil {
input = &ListTagsForResourceInput{}
}
output = &ListTagsForResourceOutput{}
req = c.newRequest(op, input, output)
return
} | 0.997302 | aws/aws-sdk-go | service/finspace/api.go | aws/aws-sdk-go | service/recyclebin/api.go | Apache-2.0 | go |
ListTagsForResourceRequest generates a "aws/request.Request" representing the
client's request for the ListTagsForResource operation. The "output" return
value will be populated with the request's response once the request completes
successfully.
Use "Send" method on the returned Request to send the API call to the service.
the "output" return value is not valid until after Send returns without error.
See ListTagsForResource for more information on using the ListTagsForResource
API call, and error handling.
This method is useful when you want to inject custom logic or configuration
into the SDK's request lifecycle. Such as custom headers, or retry logic.
// Example sending a request using the ListTagsForResourceRequest method.
req, resp := client.ListTagsForResourceRequest(params)
err := req.Send()
if err == nil { // resp is now filled
fmt.Println(resp)
}
See also, https://docs.aws.amazon.com/goto/WebAPI/finspace-2021-03-12/ListTagsForResource | func (c *Finspace) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) {
op := &request.Operation{
Name: opListTagsForResource,
HTTPMethod: "GET",
HTTPPath: "/tags/{resourceArn}",
}
if input == nil {
input = &ListTagsForResourceInput{}
}
output = &ListTagsForResourceOutput{}
req = c.newRequest(op, input, output)
return
} | func (c *Omics) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) {
op := &request.Operation{
Name: opListTagsForResource,
HTTPMethod: "GET",
HTTPPath: "/tags/{resourceArn}",
}
if input == nil {
input = &ListTagsForResourceInput{}
}
output = &ListTagsForResourceOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("tags-", nil))
req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler)
return
} | 0.997276 | aws/aws-sdk-go | service/finspace/api.go | aws/aws-sdk-go | service/omics/api.go | Apache-2.0 | go |
GetBotsWithContext is the same as GetBots with the addition of
the ability to pass a context and additional request options.
See GetBots for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *LexModelBuildingService) GetBotsWithContext(ctx aws.Context, input *GetBotsInput, opts ...request.Option) (*GetBotsOutput, error) {
req, out := c.GetBotsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *Chime) GetBotWithContext(ctx aws.Context, input *GetBotInput, opts ...request.Option) (*GetBotOutput, error) {
req, out := c.GetBotRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.928757 | aws/aws-sdk-go | service/lexmodelbuildingservice/api.go | aws/aws-sdk-go | service/chime/api.go | Apache-2.0 | go |
GetBotsWithContext is the same as GetBots with the addition of
the ability to pass a context and additional request options.
See GetBots for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *LexModelBuildingService) GetBotsWithContext(ctx aws.Context, input *GetBotsInput, opts ...request.Option) (*GetBotsOutput, error) {
req, out := c.GetBotsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *LexModelBuildingService) GetBotsPagesWithContext(ctx aws.Context, input *GetBotsInput, fn func(*GetBotsOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *GetBotsInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.GetBotsRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
for p.Next() {
if !fn(p.Page().(*GetBotsOutput), !p.HasNextPage()) {
break
}
}
return p.Err()
} | 0.909073 | aws/aws-sdk-go | service/lexmodelbuildingservice/api.go | aws/aws-sdk-go | service/lexmodelbuildingservice/api.go | Apache-2.0 | go |
GetBotsWithContext is the same as GetBots with the addition of
the ability to pass a context and additional request options.
See GetBots for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *LexModelBuildingService) GetBotsWithContext(ctx aws.Context, input *GetBotsInput, opts ...request.Option) (*GetBotsOutput, error) {
req, out := c.GetBotsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *LexModelBuildingService) GetBotAliasWithContext(ctx aws.Context, input *GetBotAliasInput, opts ...request.Option) (*GetBotAliasOutput, error) {
req, out := c.GetBotAliasRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.847477 | aws/aws-sdk-go | service/lexmodelbuildingservice/api.go | aws/aws-sdk-go | service/lexmodelbuildingservice/api.go | Apache-2.0 | go |
GetBotsWithContext is the same as GetBots with the addition of
the ability to pass a context and additional request options.
See GetBots for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *LexModelBuildingService) GetBotsWithContext(ctx aws.Context, input *GetBotsInput, opts ...request.Option) (*GetBotsOutput, error) {
req, out := c.GetBotsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *LexModelBuildingService) GetBotVersionsWithContext(ctx aws.Context, input *GetBotVersionsInput, opts ...request.Option) (*GetBotVersionsOutput, error) {
req, out := c.GetBotVersionsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.839976 | aws/aws-sdk-go | service/lexmodelbuildingservice/api.go | aws/aws-sdk-go | service/lexmodelbuildingservice/api.go | Apache-2.0 | go |
GetBotsWithContext is the same as GetBots with the addition of
the ability to pass a context and additional request options.
See GetBots for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *LexModelBuildingService) GetBotsWithContext(ctx aws.Context, input *GetBotsInput, opts ...request.Option) (*GetBotsOutput, error) {
req, out := c.GetBotsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *Chime) ListBotsWithContext(ctx aws.Context, input *ListBotsInput, opts ...request.Option) (*ListBotsOutput, error) {
req, out := c.ListBotsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.838212 | aws/aws-sdk-go | service/lexmodelbuildingservice/api.go | aws/aws-sdk-go | service/chime/api.go | Apache-2.0 | go |
Encode takes in a list of uids and a block size. It would pack these uids into blocks of the
given size, with the last block having fewer uids. Within each block, it stores the first uid as
base. For each next uid, a delta = uids[i] - uids[i-1] is stored. Protobuf uses Varint encoding,
as mentioned here: https://developers.google.com/protocol-buffers/docs/encoding . This ensures
that the deltas being considerably smaller than the original uids are nicely packed in fewer
bytes. Our benchmarks on artificial data show compressed size to be 13% of the original. This
mechanism is a LOT simpler to understand and if needed, debug. | func Encode(uids []uint64, blockSize int) *pb.UidPack {
enc := Encoder{BlockSize: blockSize}
for _, uid := range uids {
enc.Add(uid)
}
return enc.Done()
} | func encodeCount(i int) uint8 {
if i < 65536 || i > 65011712 {
panic("count arg i outside the required range")
}
for encoded := 96; encoded < 256; encoded++ {
count := decodeCount(uint8(encoded))
if count >= i {
return uint8(encoded)
}
}
return 255
} | 0.592054 | hypermodeinc/dgraph | codec/codec.go | tektoncd/cli | vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go | Apache-2.0 | go |
Encode takes in a list of uids and a block size. It would pack these uids into blocks of the
given size, with the last block having fewer uids. Within each block, it stores the first uid as
base. For each next uid, a delta = uids[i] - uids[i-1] is stored. Protobuf uses Varint encoding,
as mentioned here: https://developers.google.com/protocol-buffers/docs/encoding . This ensures
that the deltas being considerably smaller than the original uids are nicely packed in fewer
bytes. Our benchmarks on artificial data show compressed size to be 13% of the original. This
mechanism is a LOT simpler to understand and if needed, debug. | func Encode(uids []uint64, blockSize int) *pb.UidPack {
enc := Encoder{BlockSize: blockSize}
for _, uid := range uids {
enc.Add(uid)
}
return enc.Done()
} | func Encode(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); n < 0 {
panic(ErrTooLarge)
} else if len(dst) < n {
dst = make([]byte, n)
}
// The block starts with the varint-encoded length of the decompressed bytes.
d := binary.PutUvarint(dst, uint64(len(src)))
for len(src) > 0 {
p := src
src = nil
if len(p) > maxBlockSize {
p, src = p[:maxBlockSize], p[maxBlockSize:]
}
if len(p) < minNonLiteralBlockSize {
d += emitLiteral(dst[d:], p)
} else {
d += encodeBlock(dst[d:], p)
}
}
return dst[:d]
} | 0.5859 | hypermodeinc/dgraph | codec/codec.go | tektoncd/cli | vendor/github.com/golang/snappy/encode.go | Apache-2.0 | go |
Encode takes in a list of uids and a block size. It would pack these uids into blocks of the
given size, with the last block having fewer uids. Within each block, it stores the first uid as
base. For each next uid, a delta = uids[i] - uids[i-1] is stored. Protobuf uses Varint encoding,
as mentioned here: https://developers.google.com/protocol-buffers/docs/encoding . This ensures
that the deltas being considerably smaller than the original uids are nicely packed in fewer
bytes. Our benchmarks on artificial data show compressed size to be 13% of the original. This
mechanism is a LOT simpler to understand and if needed, debug. | func Encode(uids []uint64, blockSize int) *pb.UidPack {
enc := Encoder{BlockSize: blockSize}
for _, uid := range uids {
enc.Add(uid)
}
return enc.Done()
} | func (q *QRCode) encode(numTerminatorBits int) {
q.addTerminatorBits(numTerminatorBits)
q.addPadding()
encoded := q.encodeBlocks()
const numMasks int = 8
penalty := 0
for mask := 0; mask < numMasks; mask++ {
var s *symbol
var err error
s, err = buildRegularSymbol(q.version, mask, encoded)
if err != nil {
log.Panic(err.Error())
}
numEmptyModules := s.numEmptyModules()
if numEmptyModules != 0 {
log.Panicf("bug: numEmptyModules is %d (expected 0) (version=%d)",
numEmptyModules, q.VersionNumber)
}
p := s.penaltyScore()
//log.Printf("mask=%d p=%3d p1=%3d p2=%3d p3=%3d p4=%d\n", mask, p, s.penalty1(), s.penalty2(), s.penalty3(), s.penalty4())
if q.symbol == nil || p < penalty {
q.symbol = s
q.mask = mask
penalty = p
}
}
} | 0.583968 | hypermodeinc/dgraph | codec/codec.go | 42wim/matterbridge | vendor/github.com/skip2/go-qrcode/qrcode.go | Apache-2.0 | go |
Encode takes in a list of uids and a block size. It would pack these uids into blocks of the
given size, with the last block having fewer uids. Within each block, it stores the first uid as
base. For each next uid, a delta = uids[i] - uids[i-1] is stored. Protobuf uses Varint encoding,
as mentioned here: https://developers.google.com/protocol-buffers/docs/encoding . This ensures
that the deltas being considerably smaller than the original uids are nicely packed in fewer
bytes. Our benchmarks on artificial data show compressed size to be 13% of the original. This
mechanism is a LOT simpler to understand and if needed, debug. | func Encode(uids []uint64, blockSize int) *pb.UidPack {
enc := Encoder{BlockSize: blockSize}
for _, uid := range uids {
enc.Add(uid)
}
return enc.Done()
} | func encodeBlock(dst, src []byte) (d int) {
race.ReadSlice(src)
race.WriteSlice(dst)
const (
// Use 12 bit table when less than...
limit12B = 16 << 10
// Use 10 bit table when less than...
limit10B = 4 << 10
// Use 8 bit table when less than...
limit8B = 512
)
if len(src) >= 4<<20 {
const sz, pool = 65536, 0
tmp, ok := encPools[pool].Get().(*[sz]byte)
if !ok {
tmp = &[sz]byte{}
}
race.WriteSlice(tmp[:])
defer encPools[pool].Put(tmp)
return encodeBlockAsm(dst, src, tmp)
}
if len(src) >= limit12B {
const sz, pool = 65536, 0
tmp, ok := encPools[pool].Get().(*[sz]byte)
if !ok {
tmp = &[sz]byte{}
}
race.WriteSlice(tmp[:])
defer encPools[pool].Put(tmp)
return encodeBlockAsm4MB(dst, src, tmp)
}
if len(src) >= limit10B {
const sz, pool = 16384, 1
tmp, ok := encPools[pool].Get().(*[sz]byte)
if !ok {
tmp = &[sz]byte{}
}
race.WriteSlice(tmp[:])
defer encPools[pool].Put(tmp)
return encodeBlockAsm12B(dst, src, tmp)
}
if len(src) >= limit8B {
const sz, pool = 4096, 2
tmp, ok := encPools[pool].Get().(*[sz]byte)
if !ok {
tmp = &[sz]byte{}
}
race.WriteSlice(tmp[:])
defer encPools[pool].Put(tmp)
return encodeBlockAsm10B(dst, src, tmp)
}
if len(src) < minNonLiteralBlockSize {
return 0
}
const sz, pool = 1024, 3
tmp, ok := encPools[pool].Get().(*[sz]byte)
if !ok {
tmp = &[sz]byte{}
}
race.WriteSlice(tmp[:])
defer encPools[pool].Put(tmp)
return encodeBlockAsm8B(dst, src, tmp)
} | 0.577692 | hypermodeinc/dgraph | codec/codec.go | go-graphite/go-carbon | vendor/github.com/klauspost/compress/s2/encode_amd64.go | MIT | go |
Encode takes in a list of uids and a block size. It would pack these uids into blocks of the
given size, with the last block having fewer uids. Within each block, it stores the first uid as
base. For each next uid, a delta = uids[i] - uids[i-1] is stored. Protobuf uses Varint encoding,
as mentioned here: https://developers.google.com/protocol-buffers/docs/encoding . This ensures
that the deltas being considerably smaller than the original uids are nicely packed in fewer
bytes. Our benchmarks on artificial data show compressed size to be 13% of the original. This
mechanism is a LOT simpler to understand and if needed, debug. | func Encode(uids []uint64, blockSize int) *pb.UidPack {
enc := Encoder{BlockSize: blockSize}
for _, uid := range uids {
enc.Add(uid)
}
return enc.Done()
} | func (hw *HashWheel) Encode(highSeq uint64) []byte {
b := make([]byte, 0, headerLen+(hw.count*(2*binary.MaxVarintLen64)))
b = append(b, 1) // Magic version
b = binary.LittleEndian.AppendUint64(b, hw.count) // Entry count
b = binary.LittleEndian.AppendUint64(b, highSeq) // Stamp
for _, slot := range hw.wheel {
if slot == nil || slot.entries == nil {
continue
}
for v, ts := range slot.entries {
b = binary.AppendVarint(b, ts)
b = binary.AppendUvarint(b, v)
}
}
return b
} | 0.574761 | hypermodeinc/dgraph | codec/codec.go | nats-io/nats-server | server/thw/thw.go | Apache-2.0 | go |
GetIPAddressClaimLabels returns a labels map from the prow environment variables
BUILD_ID and JOB_NAME. If none of both is set it falls back to add a custom random
label. | func GetIPAddressClaimLabels() map[string]string {
labels := map[string]string{}
if val := os.Getenv("BUILD_ID"); val != "" {
labels["prow.k8s.io/build-id"] = val
}
if val := os.Getenv("REPO_NAME"); val != "" {
labels["prow.k8s.io/repo-name"] = val
}
if len(labels) == 0 {
// Adding a custom label so we don't accidentally cleanup other IPAddressClaims
labels["capv-testing/random-uid"] = rand.String(32)
}
return labels
} | func (t *MultiTSDB) extractTenantsLabels(tenantID string, initialLset labels.Labels) labels.Labels {
for _, hc := range t.hashringConfigs {
for _, tenant := range hc.Tenants {
if tenant != tenantID {
continue
}
return labelpb.ExtendSortedLabels(hc.ExternalLabels, initialLset)
}
}
return initialLset
} | 0.567178 | kubernetes-sigs/cluster-api-provider-vsphere | test/framework/ip/incluster.go | thanos-io/thanos | pkg/receive/multitsdb.go | Apache-2.0 | go |
GetIPAddressClaimLabels returns a labels map from the prow environment variables
BUILD_ID and JOB_NAME. If none of both is set it falls back to add a custom random
label. | func GetIPAddressClaimLabels() map[string]string {
labels := map[string]string{}
if val := os.Getenv("BUILD_ID"); val != "" {
labels["prow.k8s.io/build-id"] = val
}
if val := os.Getenv("REPO_NAME"); val != "" {
labels["prow.k8s.io/repo-name"] = val
}
if len(labels) == 0 {
// Adding a custom label so we don't accidentally cleanup other IPAddressClaims
labels["capv-testing/random-uid"] = rand.String(32)
}
return labels
} | func GenericLabels() map[string]string {
return core.DefaultLabels(core.SessionID())
} | 0.54197 | kubernetes-sigs/cluster-api-provider-vsphere | test/framework/ip/incluster.go | umputun/tg-spam | vendor/github.com/testcontainers/testcontainers-go/generic.go | MIT | go |
GetIPAddressClaimLabels returns a labels map from the prow environment variables
BUILD_ID and JOB_NAME. If none of both is set it falls back to add a custom random
label. | func GetIPAddressClaimLabels() map[string]string {
labels := map[string]string{}
if val := os.Getenv("BUILD_ID"); val != "" {
labels["prow.k8s.io/build-id"] = val
}
if val := os.Getenv("REPO_NAME"); val != "" {
labels["prow.k8s.io/repo-name"] = val
}
if len(labels) == 0 {
// Adding a custom label so we don't accidentally cleanup other IPAddressClaims
labels["capv-testing/random-uid"] = rand.String(32)
}
return labels
} | func clusterRoleVMLabels(ctx *vmware.ClusterContext, controlPlane bool) map[string]string {
result := map[string]string{
clusterSelectorKey: ctx.Cluster.Name,
legacyClusterSelectorKey: ctx.Cluster.Name,
}
if controlPlane {
result[nodeSelectorKey] = roleControlPlane
result[legacyNodeSelectorKey] = roleControlPlane
} else {
result[nodeSelectorKey] = roleNode
result[legacyNodeSelectorKey] = roleNode
}
return result
} | 0.53999 | kubernetes-sigs/cluster-api-provider-vsphere | test/framework/ip/incluster.go | kubernetes-sigs/cluster-api-provider-vsphere | pkg/services/vmoperator/control_plane_endpoint.go | Apache-2.0 | go |
GetIPAddressClaimLabels returns a labels map from the prow environment variables
BUILD_ID and JOB_NAME. If none of both is set it falls back to add a custom random
label. | func GetIPAddressClaimLabels() map[string]string {
labels := map[string]string{}
if val := os.Getenv("BUILD_ID"); val != "" {
labels["prow.k8s.io/build-id"] = val
}
if val := os.Getenv("REPO_NAME"); val != "" {
labels["prow.k8s.io/repo-name"] = val
}
if len(labels) == 0 {
// Adding a custom label so we don't accidentally cleanup other IPAddressClaims
labels["capv-testing/random-uid"] = rand.String(32)
}
return labels
} | func InitLabels(options []string) (plabel string, mlabel string, retErr error) {
if !selinux.GetEnabled() {
return "", "", nil
}
processLabel, mountLabel := selinux.ContainerLabels()
if processLabel != "" {
defer func() {
if retErr != nil {
selinux.ReleaseLabel(mountLabel)
}
}()
pcon, err := selinux.NewContext(processLabel)
if err != nil {
return "", "", err
}
mcsLevel := pcon["level"]
mcon, err := selinux.NewContext(mountLabel)
if err != nil {
return "", "", err
}
for _, opt := range options {
if opt == "disable" {
selinux.ReleaseLabel(mountLabel)
return "", selinux.PrivContainerMountLabel(), nil
}
if i := strings.Index(opt, ":"); i == -1 {
return "", "", fmt.Errorf("Bad label option %q, valid options 'disable' or \n'user, role, level, type, filetype' followed by ':' and a value", opt)
}
con := strings.SplitN(opt, ":", 2)
if !validOptions[con[0]] {
return "", "", fmt.Errorf("Bad label option %q, valid options 'disable, user, role, level, type, filetype'", con[0])
}
if con[0] == "filetype" {
mcon["type"] = con[1]
continue
}
pcon[con[0]] = con[1]
if con[0] == "level" || con[0] == "user" {
mcon[con[0]] = con[1]
}
}
if pcon.Get() != processLabel {
if pcon["level"] != mcsLevel {
selinux.ReleaseLabel(processLabel)
}
processLabel = pcon.Get()
selinux.ReserveLabel(processLabel)
}
mountLabel = mcon.Get()
}
return processLabel, mountLabel, nil
} | 0.52967 | kubernetes-sigs/cluster-api-provider-vsphere | test/framework/ip/incluster.go | Mirantis/cri-dockerd | vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go | Apache-2.0 | go |
GetIPAddressClaimLabels returns a labels map from the prow environment variables
BUILD_ID and JOB_NAME. If none of both is set it falls back to add a custom random
label. | func GetIPAddressClaimLabels() map[string]string {
labels := map[string]string{}
if val := os.Getenv("BUILD_ID"); val != "" {
labels["prow.k8s.io/build-id"] = val
}
if val := os.Getenv("REPO_NAME"); val != "" {
labels["prow.k8s.io/repo-name"] = val
}
if len(labels) == 0 {
// Adding a custom label so we don't accidentally cleanup other IPAddressClaims
labels["capv-testing/random-uid"] = rand.String(32)
}
return labels
} | func HostnameGroupingKey() map[string]string {
hostname, err := os.Hostname()
if err != nil {
return map[string]string{"instance": "unknown"}
}
return map[string]string{"instance": hostname}
} | 0.525859 | kubernetes-sigs/cluster-api-provider-vsphere | test/framework/ip/incluster.go | genuinetools/binctr | vendor/github.com/prometheus/client_golang/prometheus/push/deprecated.go | MIT | go |
Returns a list of all Amazon S3 directory buckets owned by the authenticated
sender of the request. For more information about directory buckets, see [Directory buckets]in the
Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API
operation to the Regional endpoint. These endpoints support path-style requests
in the format https://s3express-control.region-code.amazonaws.com/bucket-name .
Virtual-hosted-style requests aren't supported. For more information about
endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more
information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
Permissions You must have the s3express:ListAllMyDirectoryBuckets permission in
an IAM identity-based policy instead of a bucket policy. Cross-account access to
this API operation isn't supported. This operation can only be performed by the
Amazon Web Services account that owns the resource. For more information about
directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide.
HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
s3express-control.region.amazonaws.com .
The BucketRegion response element is not part of the ListDirectoryBuckets
Response Syntax.
[Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
[Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html
[Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
[Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html | func (c *Client) ListDirectoryBuckets(ctx context.Context, params *ListDirectoryBucketsInput, optFns ...func(*Options)) (*ListDirectoryBucketsOutput, error) {
if params == nil {
params = &ListDirectoryBucketsInput{}
}
result, metadata, err := c.invokeOperation(ctx, "ListDirectoryBuckets", params, optFns, c.addOperationListDirectoryBucketsMiddlewares)
if err != nil {
return nil, err
}
out := result.(*ListDirectoryBucketsOutput)
out.ResultMetadata = metadata
return out, nil
} | func (c *S3) ListDirectoryBuckets(input *ListDirectoryBucketsInput) (*ListDirectoryBucketsOutput, error) {
req, out := c.ListDirectoryBucketsRequest(input)
return out, req.Send()
} | 0.886357 | umputun/tg-spam | vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go | aws/aws-sdk-go | service/s3/api.go | Apache-2.0 | go |
Returns a list of all Amazon S3 directory buckets owned by the authenticated
sender of the request. For more information about directory buckets, see [Directory buckets]in the
Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API
operation to the Regional endpoint. These endpoints support path-style requests
in the format https://s3express-control.region-code.amazonaws.com/bucket-name .
Virtual-hosted-style requests aren't supported. For more information about
endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more
information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
Permissions You must have the s3express:ListAllMyDirectoryBuckets permission in
an IAM identity-based policy instead of a bucket policy. Cross-account access to
this API operation isn't supported. This operation can only be performed by the
Amazon Web Services account that owns the resource. For more information about
directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide.
HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
s3express-control.region.amazonaws.com .
The BucketRegion response element is not part of the ListDirectoryBuckets
Response Syntax.
[Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
[Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html
[Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
[Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html | func (c *Client) ListDirectoryBuckets(ctx context.Context, params *ListDirectoryBucketsInput, optFns ...func(*Options)) (*ListDirectoryBucketsOutput, error) {
if params == nil {
params = &ListDirectoryBucketsInput{}
}
result, metadata, err := c.invokeOperation(ctx, "ListDirectoryBuckets", params, optFns, c.addOperationListDirectoryBucketsMiddlewares)
if err != nil {
return nil, err
}
out := result.(*ListDirectoryBucketsOutput)
out.ResultMetadata = metadata
return out, nil
} | func (c *Client) ListBuckets(ctx context.Context, params *ListBucketsInput, optFns ...func(*Options)) (*ListBucketsOutput, error) {
if params == nil {
params = &ListBucketsInput{}
}
result, metadata, err := c.invokeOperation(ctx, "ListBuckets", params, optFns, c.addOperationListBucketsMiddlewares)
if err != nil {
return nil, err
}
out := result.(*ListBucketsOutput)
out.ResultMetadata = metadata
return out, nil
} | 0.883907 | umputun/tg-spam | vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go | umputun/tg-spam | vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go | MIT | go |
Returns a list of all Amazon S3 directory buckets owned by the authenticated
sender of the request. For more information about directory buckets, see [Directory buckets]in the
Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API
operation to the Regional endpoint. These endpoints support path-style requests
in the format https://s3express-control.region-code.amazonaws.com/bucket-name .
Virtual-hosted-style requests aren't supported. For more information about
endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more
information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
Permissions You must have the s3express:ListAllMyDirectoryBuckets permission in
an IAM identity-based policy instead of a bucket policy. Cross-account access to
this API operation isn't supported. This operation can only be performed by the
Amazon Web Services account that owns the resource. For more information about
directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide.
HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
s3express-control.region.amazonaws.com .
The BucketRegion response element is not part of the ListDirectoryBuckets
Response Syntax.
[Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
[Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html
[Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
[Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html | func (c *Client) ListDirectoryBuckets(ctx context.Context, params *ListDirectoryBucketsInput, optFns ...func(*Options)) (*ListDirectoryBucketsOutput, error) {
if params == nil {
params = &ListDirectoryBucketsInput{}
}
result, metadata, err := c.invokeOperation(ctx, "ListDirectoryBuckets", params, optFns, c.addOperationListDirectoryBucketsMiddlewares)
if err != nil {
return nil, err
}
out := result.(*ListDirectoryBucketsOutput)
out.ResultMetadata = metadata
return out, nil
} | func (c *AmplifyBackend) ListS3Buckets(input *ListS3BucketsInput) (*ListS3BucketsOutput, error) {
req, out := c.ListS3BucketsRequest(input)
return out, req.Send()
} | 0.757355 | umputun/tg-spam | vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go | aws/aws-sdk-go | service/amplifybackend/api.go | Apache-2.0 | go |
Returns a list of all Amazon S3 directory buckets owned by the authenticated
sender of the request. For more information about directory buckets, see [Directory buckets]in the
Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API
operation to the Regional endpoint. These endpoints support path-style requests
in the format https://s3express-control.region-code.amazonaws.com/bucket-name .
Virtual-hosted-style requests aren't supported. For more information about
endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more
information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
Permissions You must have the s3express:ListAllMyDirectoryBuckets permission in
an IAM identity-based policy instead of a bucket policy. Cross-account access to
this API operation isn't supported. This operation can only be performed by the
Amazon Web Services account that owns the resource. For more information about
directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide.
HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
s3express-control.region.amazonaws.com .
The BucketRegion response element is not part of the ListDirectoryBuckets
Response Syntax.
[Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
[Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html
[Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
[Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html | func (c *Client) ListDirectoryBuckets(ctx context.Context, params *ListDirectoryBucketsInput, optFns ...func(*Options)) (*ListDirectoryBucketsOutput, error) {
if params == nil {
params = &ListDirectoryBucketsInput{}
}
result, metadata, err := c.invokeOperation(ctx, "ListDirectoryBuckets", params, optFns, c.addOperationListDirectoryBucketsMiddlewares)
if err != nil {
return nil, err
}
out := result.(*ListDirectoryBucketsOutput)
out.ResultMetadata = metadata
return out, nil
} | func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) {
req, out := c.ListBucketsRequest(input)
return out, req.Send()
} | 0.755061 | umputun/tg-spam | vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go | armory/spinnaker-operator | vendor/github.com/aws/aws-sdk-go/service/s3/api.go | Apache-2.0 | go |
Returns a list of all Amazon S3 directory buckets owned by the authenticated
sender of the request. For more information about directory buckets, see [Directory buckets]in the
Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API
operation to the Regional endpoint. These endpoints support path-style requests
in the format https://s3express-control.region-code.amazonaws.com/bucket-name .
Virtual-hosted-style requests aren't supported. For more information about
endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more
information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
Permissions You must have the s3express:ListAllMyDirectoryBuckets permission in
an IAM identity-based policy instead of a bucket policy. Cross-account access to
this API operation isn't supported. This operation can only be performed by the
Amazon Web Services account that owns the resource. For more information about
directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide.
HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
s3express-control.region.amazonaws.com .
The BucketRegion response element is not part of the ListDirectoryBuckets
Response Syntax.
[Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
[Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html
[Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
[Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html | func (c *Client) ListDirectoryBuckets(ctx context.Context, params *ListDirectoryBucketsInput, optFns ...func(*Options)) (*ListDirectoryBucketsOutput, error) {
if params == nil {
params = &ListDirectoryBucketsInput{}
}
result, metadata, err := c.invokeOperation(ctx, "ListDirectoryBuckets", params, optFns, c.addOperationListDirectoryBucketsMiddlewares)
if err != nil {
return nil, err
}
out := result.(*ListDirectoryBucketsOutput)
out.ResultMetadata = metadata
return out, nil
} | func (c *Client) GetBucketPolicy(ctx context.Context, params *GetBucketPolicyInput, optFns ...func(*Options)) (*GetBucketPolicyOutput, error) {
if params == nil {
params = &GetBucketPolicyInput{}
}
result, metadata, err := c.invokeOperation(ctx, "GetBucketPolicy", params, optFns, c.addOperationGetBucketPolicyMiddlewares)
if err != nil {
return nil, err
}
out := result.(*GetBucketPolicyOutput)
out.ResultMetadata = metadata
return out, nil
} | 0.752382 | umputun/tg-spam | vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go | umputun/tg-spam | vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go | MIT | go |
maintainInstallationFinalizer manages this controller's finalizer on the Installation resource.
We add a finalizer to the Installation when the API server has been installed, and only remove that finalizer when
the API server has been deleted and its pods have stopped running. This allows for a graceful cleanup of API server resources
prior to the CNI plugin being removed. | func maintainInstallationFinalizer(ctx context.Context, c client.Client, apiserver *operatorv1.APIServer) error {
// Get the Installation.
installation := &operatorv1.Installation{}
if err := c.Get(ctx, utils.DefaultInstanceKey, installation); err != nil {
if errors.IsNotFound(err) {
log.V(1).Info("Installation config not found")
return nil
}
log.Error(err, "An error occurred when querying the Installation resource")
return err
}
patchFrom := client.MergeFrom(installation.DeepCopy())
// Determine the correct finalizers to apply to the Installation. If the APIServer exists, we should apply
// a finalizer. Otherwise, if the API server namespace doesn't exist we should remove it. This ensures the finalizer
// is always present so long as the resources managed by this controller exist in the cluster.
if apiserver != nil {
// Add a finalizer indicating that the API server is still running.
utils.SetInstallationFinalizer(installation, render.APIServerFinalizer)
} else {
// Check if the API server namespace exists, and remove the finalizer if not. Gating this on Namespace removal
// in the best way to approximate that all API server related resources have been removed.
l := &corev1.Namespace{}
err := c.Get(ctx, types.NamespacedName{Name: rmeta.APIServerNamespace(installation.Spec.Variant)}, l)
if err != nil && !errors.IsNotFound(err) {
return err
} else if errors.IsNotFound(err) {
log.Info("API server Namespace does not exist, removing finalizer", "finalizer", render.APIServerFinalizer)
utils.RemoveInstallationFinalizer(installation, render.APIServerFinalizer)
} else {
log.Info("API server Namespace is still present, waiting for termination")
}
}
// Update the installation with any finalizer changes.
return c.Patch(ctx, installation, patchFrom)
} | func (pt *PackageTool) RunInstall(opts *PackageOptions) error {
if opts.Version == "v0.0.0-latest" {
warningMarker := color.New(color.FgYellow).Add(color.Bold).Sprintf("WARNING")
pt.logger.Println(fmt.Sprintf("%s: Currently using the latest version of envoy gateway chart, it is recommended to use the fixed version",
warningMarker))
}
pt.setCommonValue()
egChart, err := pt.loadChart(opts)
if err != nil {
return err
}
crdInfo, err := pt.extractCRDs(egChart)
if err != nil {
return err
}
// Before we install CRDs, we need to ensure that CRDs do not exist in the cluster
// After we install CRDs, we need to ensure that the CRDs are successfully installed into the cluster
if !opts.SkipCRD && !opts.DryRun {
if len(crdInfo) == 0 {
return fmt.Errorf("CRDs not found in the envoy gateway chart")
}
if exist, err := detectExistCRDs(crdInfo); exist == nil || *exist {
if err == nil {
err = fmt.Errorf("found installed envoy gateway CRDs and gateway api CRDs, unable to continue installation")
}
return err
}
if err := installCRDs(crdInfo, pt.actionConfig); err != nil {
return err
}
if exist, err := detectExistCRDs(crdInfo); exist == nil || !*exist {
if err != nil {
return fmt.Errorf("failed to install CRDs of envoy gateway")
}
return err
}
if opts.OnlyCRD {
return nil
}
}
// Merge all values flag
providers := getter.All(pt.envSettings)
egChartValues, err := pt.valuesOpts.MergeValues(providers)
if err != nil {
return err
}
pt.setInstallOptions(opts)
release, err := pt.actionInstall.Run(egChart, egChartValues)
if err != nil {
return fmt.Errorf("failed to install envoy gateway resource: %w", err)
}
if len(opts.Output) != 0 {
var outputErr error
var outputFile *os.File
if outputFile, outputErr = os.OpenFile(opts.Output, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0o666); outputErr == nil {
_, outputErr = fmt.Fprint(outputFile, release.Manifest)
}
if outputErr != nil {
pt.logger.Println(fmt.Errorf("failed to output manifests to specified file: %w", outputErr).Error())
}
}
if opts.DryRun {
pt.logger.Println(release.Manifest)
return nil
}
successMarker := color.New(color.FgGreen).Add(color.Bold).Sprintf("SUCCESS")
pt.logger.Println(fmt.Sprintf("%s: Envoy gateway installed", successMarker))
return nil
} | 0.585031 | tigera/operator | pkg/controller/apiserver/apiserver_controller.go | envoyproxy/gateway | internal/utils/helm/package.go | Apache-2.0 | go |
maintainInstallationFinalizer manages this controller's finalizer on the Installation resource.
We add a finalizer to the Installation when the API server has been installed, and only remove that finalizer when
the API server has been deleted and its pods have stopped running. This allows for a graceful cleanup of API server resources
prior to the CNI plugin being removed. | func maintainInstallationFinalizer(ctx context.Context, c client.Client, apiserver *operatorv1.APIServer) error {
// Get the Installation.
installation := &operatorv1.Installation{}
if err := c.Get(ctx, utils.DefaultInstanceKey, installation); err != nil {
if errors.IsNotFound(err) {
log.V(1).Info("Installation config not found")
return nil
}
log.Error(err, "An error occurred when querying the Installation resource")
return err
}
patchFrom := client.MergeFrom(installation.DeepCopy())
// Determine the correct finalizers to apply to the Installation. If the APIServer exists, we should apply
// a finalizer. Otherwise, if the API server namespace doesn't exist we should remove it. This ensures the finalizer
// is always present so long as the resources managed by this controller exist in the cluster.
if apiserver != nil {
// Add a finalizer indicating that the API server is still running.
utils.SetInstallationFinalizer(installation, render.APIServerFinalizer)
} else {
// Check if the API server namespace exists, and remove the finalizer if not. Gating this on Namespace removal
// in the best way to approximate that all API server related resources have been removed.
l := &corev1.Namespace{}
err := c.Get(ctx, types.NamespacedName{Name: rmeta.APIServerNamespace(installation.Spec.Variant)}, l)
if err != nil && !errors.IsNotFound(err) {
return err
} else if errors.IsNotFound(err) {
log.Info("API server Namespace does not exist, removing finalizer", "finalizer", render.APIServerFinalizer)
utils.RemoveInstallationFinalizer(installation, render.APIServerFinalizer)
} else {
log.Info("API server Namespace is still present, waiting for termination")
}
}
// Update the installation with any finalizer changes.
return c.Patch(ctx, installation, patchFrom)
} | func cleanupInstallation(p environment.Paths, plugin index.Plugin, oldVersion string) error {
if plugin.Name == constants.KrewPluginName && IsWindows() {
klog.V(1).Infof("not removing old version of krew during upgrade on windows (should be cleaned up on the next run)")
return nil
}
klog.V(1).Infof("Remove old plugin installation under %q", p.PluginVersionInstallPath(plugin.Name, oldVersion))
return os.RemoveAll(p.PluginVersionInstallPath(plugin.Name, oldVersion))
} | 0.544487 | tigera/operator | pkg/controller/apiserver/apiserver_controller.go | kubernetes-sigs/krew | internal/installation/upgrade.go | Apache-2.0 | go |
maintainInstallationFinalizer manages this controller's finalizer on the Installation resource.
We add a finalizer to the Installation when the API server has been installed, and only remove that finalizer when
the API server has been deleted and its pods have stopped running. This allows for a graceful cleanup of API server resources
prior to the CNI plugin being removed. | func maintainInstallationFinalizer(ctx context.Context, c client.Client, apiserver *operatorv1.APIServer) error {
// Get the Installation.
installation := &operatorv1.Installation{}
if err := c.Get(ctx, utils.DefaultInstanceKey, installation); err != nil {
if errors.IsNotFound(err) {
log.V(1).Info("Installation config not found")
return nil
}
log.Error(err, "An error occurred when querying the Installation resource")
return err
}
patchFrom := client.MergeFrom(installation.DeepCopy())
// Determine the correct finalizers to apply to the Installation. If the APIServer exists, we should apply
// a finalizer. Otherwise, if the API server namespace doesn't exist we should remove it. This ensures the finalizer
// is always present so long as the resources managed by this controller exist in the cluster.
if apiserver != nil {
// Add a finalizer indicating that the API server is still running.
utils.SetInstallationFinalizer(installation, render.APIServerFinalizer)
} else {
// Check if the API server namespace exists, and remove the finalizer if not. Gating this on Namespace removal
// in the best way to approximate that all API server related resources have been removed.
l := &corev1.Namespace{}
err := c.Get(ctx, types.NamespacedName{Name: rmeta.APIServerNamespace(installation.Spec.Variant)}, l)
if err != nil && !errors.IsNotFound(err) {
return err
} else if errors.IsNotFound(err) {
log.Info("API server Namespace does not exist, removing finalizer", "finalizer", render.APIServerFinalizer)
utils.RemoveInstallationFinalizer(installation, render.APIServerFinalizer)
} else {
log.Info("API server Namespace is still present, waiting for termination")
}
}
// Update the installation with any finalizer changes.
return c.Patch(ctx, installation, patchFrom)
} | func AddCleanupAction(fn func()) CleanupActionHandle {
p := CleanupActionHandle(new(int))
cleanupActionsLock.Lock()
defer cleanupActionsLock.Unlock()
c := cleanupFuncHandle{actionHandle: p, actionHook: fn}
cleanupHookList = append([]cleanupFuncHandle{c}, cleanupHookList...)
return p
} | 0.526304 | tigera/operator | pkg/controller/apiserver/apiserver_controller.go | fatedier/frp | test/e2e/framework/cleanup.go | Apache-2.0 | go |
maintainInstallationFinalizer manages this controller's finalizer on the Installation resource.
We add a finalizer to the Installation when the API server has been installed, and only remove that finalizer when
the API server has been deleted and its pods have stopped running. This allows for a graceful cleanup of API server resources
prior to the CNI plugin being removed. | func maintainInstallationFinalizer(ctx context.Context, c client.Client, apiserver *operatorv1.APIServer) error {
// Get the Installation.
installation := &operatorv1.Installation{}
if err := c.Get(ctx, utils.DefaultInstanceKey, installation); err != nil {
if errors.IsNotFound(err) {
log.V(1).Info("Installation config not found")
return nil
}
log.Error(err, "An error occurred when querying the Installation resource")
return err
}
patchFrom := client.MergeFrom(installation.DeepCopy())
// Determine the correct finalizers to apply to the Installation. If the APIServer exists, we should apply
// a finalizer. Otherwise, if the API server namespace doesn't exist we should remove it. This ensures the finalizer
// is always present so long as the resources managed by this controller exist in the cluster.
if apiserver != nil {
// Add a finalizer indicating that the API server is still running.
utils.SetInstallationFinalizer(installation, render.APIServerFinalizer)
} else {
// Check if the API server namespace exists, and remove the finalizer if not. Gating this on Namespace removal
// in the best way to approximate that all API server related resources have been removed.
l := &corev1.Namespace{}
err := c.Get(ctx, types.NamespacedName{Name: rmeta.APIServerNamespace(installation.Spec.Variant)}, l)
if err != nil && !errors.IsNotFound(err) {
return err
} else if errors.IsNotFound(err) {
log.Info("API server Namespace does not exist, removing finalizer", "finalizer", render.APIServerFinalizer)
utils.RemoveInstallationFinalizer(installation, render.APIServerFinalizer)
} else {
log.Info("API server Namespace is still present, waiting for termination")
}
}
// Update the installation with any finalizer changes.
return c.Patch(ctx, installation, patchFrom)
} | func (s *GenericAPIServer) InstallAPIGroups(apiGroupInfos ...*APIGroupInfo) error {
for _, apiGroupInfo := range apiGroupInfos {
// Do not register empty group or empty version. Doing so claims /apis/ for the wrong entity to be returned.
// Catching these here places the error much closer to its origin
if len(apiGroupInfo.PrioritizedVersions[0].Group) == 0 {
return fmt.Errorf("cannot register handler with an empty group for %#v", *apiGroupInfo)
}
if len(apiGroupInfo.PrioritizedVersions[0].Version) == 0 {
return fmt.Errorf("cannot register handler with an empty version for %#v", *apiGroupInfo)
}
}
openAPIModels, err := s.getOpenAPIModels(APIGroupPrefix, apiGroupInfos...)
if err != nil {
return fmt.Errorf("unable to get openapi models: %v", err)
}
for _, apiGroupInfo := range apiGroupInfos {
if err := s.installAPIResources(APIGroupPrefix, apiGroupInfo, openAPIModels); err != nil {
return fmt.Errorf("unable to install api resources: %v", err)
}
// setup discovery
// Install the version handler.
// Add a handler at /apis/<groupName> to enumerate all versions supported by this group.
apiVersionsForDiscovery := []metav1.GroupVersionForDiscovery{}
for _, groupVersion := range apiGroupInfo.PrioritizedVersions {
// Check the config to make sure that we elide versions that don't have any resources
if len(apiGroupInfo.VersionedResourcesStorageMap[groupVersion.Version]) == 0 {
continue
}
apiVersionsForDiscovery = append(apiVersionsForDiscovery, metav1.GroupVersionForDiscovery{
GroupVersion: groupVersion.String(),
Version: groupVersion.Version,
})
}
preferredVersionForDiscovery := metav1.GroupVersionForDiscovery{
GroupVersion: apiGroupInfo.PrioritizedVersions[0].String(),
Version: apiGroupInfo.PrioritizedVersions[0].Version,
}
apiGroup := metav1.APIGroup{
Name: apiGroupInfo.PrioritizedVersions[0].Group,
Versions: apiVersionsForDiscovery,
PreferredVersion: preferredVersionForDiscovery,
}
s.DiscoveryGroupManager.AddGroup(apiGroup)
s.Handler.GoRestfulContainer.Add(discovery.NewAPIGroupHandler(s.Serializer, apiGroup).WebService())
}
return nil
} | 0.494127 | tigera/operator | pkg/controller/apiserver/apiserver_controller.go | config-syncer/config-syncer | vendor/k8s.io/apiserver/pkg/server/genericapiserver.go | Apache-2.0 | go |
maintainInstallationFinalizer manages this controller's finalizer on the Installation resource.
We add a finalizer to the Installation when the API server has been installed, and only remove that finalizer when
the API server has been deleted and its pods have stopped running. This allows for a graceful cleanup of API server resources
prior to the CNI plugin being removed. | func maintainInstallationFinalizer(ctx context.Context, c client.Client, apiserver *operatorv1.APIServer) error {
// Get the Installation.
installation := &operatorv1.Installation{}
if err := c.Get(ctx, utils.DefaultInstanceKey, installation); err != nil {
if errors.IsNotFound(err) {
log.V(1).Info("Installation config not found")
return nil
}
log.Error(err, "An error occurred when querying the Installation resource")
return err
}
patchFrom := client.MergeFrom(installation.DeepCopy())
// Determine the correct finalizers to apply to the Installation. If the APIServer exists, we should apply
// a finalizer. Otherwise, if the API server namespace doesn't exist we should remove it. This ensures the finalizer
// is always present so long as the resources managed by this controller exist in the cluster.
if apiserver != nil {
// Add a finalizer indicating that the API server is still running.
utils.SetInstallationFinalizer(installation, render.APIServerFinalizer)
} else {
// Check if the API server namespace exists, and remove the finalizer if not. Gating this on Namespace removal
// in the best way to approximate that all API server related resources have been removed.
l := &corev1.Namespace{}
err := c.Get(ctx, types.NamespacedName{Name: rmeta.APIServerNamespace(installation.Spec.Variant)}, l)
if err != nil && !errors.IsNotFound(err) {
return err
} else if errors.IsNotFound(err) {
log.Info("API server Namespace does not exist, removing finalizer", "finalizer", render.APIServerFinalizer)
utils.RemoveInstallationFinalizer(installation, render.APIServerFinalizer)
} else {
log.Info("API server Namespace is still present, waiting for termination")
}
}
// Update the installation with any finalizer changes.
return c.Patch(ctx, installation, patchFrom)
} | func (s *GenericAPIServer) InstallAPIGroup(apiGroupInfo *APIGroupInfo) error {
return s.InstallAPIGroups(apiGroupInfo)
} | 0.484969 | tigera/operator | pkg/controller/apiserver/apiserver_controller.go | config-syncer/config-syncer | vendor/k8s.io/apiserver/pkg/server/genericapiserver.go | Apache-2.0 | go |
writeCurrentContext takes three possible paths.
If newCurrentContext is the same as the startingConfig's current context, then we exit.
If newCurrentContext has a value, then that value is written into the default destination file.
If newCurrentContext is empty, then we find the config file that is setting the CurrentContext and clear the value from that file | func writeCurrentContext(configAccess ConfigAccess, newCurrentContext string) error {
if startingConfig, err := configAccess.GetStartingConfig(); err != nil {
return err
} else if startingConfig.CurrentContext == newCurrentContext {
return nil
}
if configAccess.IsExplicitFile() {
file := configAccess.GetExplicitFile()
currConfig, err := getConfigFromFile(file)
if err != nil {
return err
}
currConfig.CurrentContext = newCurrentContext
if err := WriteToFile(*currConfig, file); err != nil {
return err
}
return nil
}
if len(newCurrentContext) > 0 {
destinationFile := configAccess.GetDefaultFilename()
config, err := getConfigFromFile(destinationFile)
if err != nil {
return err
}
config.CurrentContext = newCurrentContext
if err := WriteToFile(*config, destinationFile); err != nil {
return err
}
return nil
}
// we're supposed to be clearing the current context. We need to find the first spot in the chain that is setting it and clear it
for _, file := range configAccess.GetLoadingPrecedence() {
if _, err := os.Stat(file); err == nil {
currConfig, err := getConfigFromFile(file)
if err != nil {
return err
}
if len(currConfig.CurrentContext) > 0 {
currConfig.CurrentContext = newCurrentContext
if err := WriteToFile(*currConfig, file); err != nil {
return err
}
return nil
}
}
}
return errors.New("no config found to write context")
} | func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config, relativizePaths bool) error {
if UseModifyConfigLock {
possibleSources := configAccess.GetLoadingPrecedence()
// sort the possible kubeconfig files so we always "lock" in the same order
// to avoid deadlock (note: this can fail w/ symlinks, but... come on).
sort.Strings(possibleSources)
for _, filename := range possibleSources {
if err := lockFile(filename); err != nil {
return err
}
defer unlockFile(filename)
}
}
startingConfig, err := configAccess.GetStartingConfig()
if err != nil {
return err
}
// We need to find all differences, locate their original files, read a partial config to modify only that stanza and write out the file.
// Special case the test for current context and preferences since those always write to the default file.
if reflect.DeepEqual(*startingConfig, newConfig) {
// nothing to do
return nil
}
if startingConfig.CurrentContext != newConfig.CurrentContext {
if err := writeCurrentContext(configAccess, newConfig.CurrentContext); err != nil {
return err
}
}
if !reflect.DeepEqual(startingConfig.Preferences, newConfig.Preferences) {
if err := writePreferences(configAccess, newConfig.Preferences); err != nil {
return err
}
}
// Search every cluster, authInfo, and context. First from new to old for differences, then from old to new for deletions
for key, cluster := range newConfig.Clusters {
startingCluster, exists := startingConfig.Clusters[key]
if !reflect.DeepEqual(cluster, startingCluster) || !exists {
destinationFile := cluster.LocationOfOrigin
if len(destinationFile) == 0 {
destinationFile = configAccess.GetDefaultFilename()
}
configToWrite, err := getConfigFromFile(destinationFile)
if err != nil {
return err
}
t := *cluster
configToWrite.Clusters[key] = &t
configToWrite.Clusters[key].LocationOfOrigin = destinationFile
if relativizePaths {
if err := RelativizeClusterLocalPaths(configToWrite.Clusters[key]); err != nil {
return err
}
}
if err := WriteToFile(*configToWrite, destinationFile); err != nil {
return err
}
}
}
// seenConfigs stores a map of config source filenames to computed config objects
seenConfigs := map[string]*clientcmdapi.Config{}
for key, context := range newConfig.Contexts {
startingContext, exists := startingConfig.Contexts[key]
if !reflect.DeepEqual(context, startingContext) || !exists {
destinationFile := context.LocationOfOrigin
if len(destinationFile) == 0 {
destinationFile = configAccess.GetDefaultFilename()
}
// we only obtain a fresh config object from its source file
// if we have not seen it already - this prevents us from
// reading and writing to the same number of files repeatedly
// when multiple / all contexts share the same destination file.
configToWrite, seen := seenConfigs[destinationFile]
if !seen {
var err error
configToWrite, err = getConfigFromFile(destinationFile)
if err != nil {
return err
}
seenConfigs[destinationFile] = configToWrite
}
configToWrite.Contexts[key] = context
}
}
// actually persist config object changes
for destinationFile, configToWrite := range seenConfigs {
if err := WriteToFile(*configToWrite, destinationFile); err != nil {
return err
}
}
for key, authInfo := range newConfig.AuthInfos {
startingAuthInfo, exists := startingConfig.AuthInfos[key]
if !reflect.DeepEqual(authInfo, startingAuthInfo) || !exists {
destinationFile := authInfo.LocationOfOrigin
if len(destinationFile) == 0 {
destinationFile = configAccess.GetDefaultFilename()
}
configToWrite, err := getConfigFromFile(destinationFile)
if err != nil {
return err
}
t := *authInfo
configToWrite.AuthInfos[key] = &t
configToWrite.AuthInfos[key].LocationOfOrigin = destinationFile
if relativizePaths {
if err := RelativizeAuthInfoLocalPaths(configToWrite.AuthInfos[key]); err != nil {
return err
}
}
if err := WriteToFile(*configToWrite, destinationFile); err != nil {
return err
}
}
}
for key, cluster := range startingConfig.Clusters {
if _, exists := newConfig.Clusters[key]; !exists {
destinationFile := cluster.LocationOfOrigin
if len(destinationFile) == 0 {
destinationFile = configAccess.GetDefaultFilename()
}
configToWrite, err := getConfigFromFile(destinationFile)
if err != nil {
return err
}
delete(configToWrite.Clusters, key)
if err := WriteToFile(*configToWrite, destinationFile); err != nil {
return err
}
}
}
for key, context := range startingConfig.Contexts {
if _, exists := newConfig.Contexts[key]; !exists {
destinationFile := context.LocationOfOrigin
if len(destinationFile) == 0 {
destinationFile = configAccess.GetDefaultFilename()
}
configToWrite, err := getConfigFromFile(destinationFile)
if err != nil {
return err
}
delete(configToWrite.Contexts, key)
if err := WriteToFile(*configToWrite, destinationFile); err != nil {
return err
}
}
}
for key, authInfo := range startingConfig.AuthInfos {
if _, exists := newConfig.AuthInfos[key]; !exists {
destinationFile := authInfo.LocationOfOrigin
if len(destinationFile) == 0 {
destinationFile = configAccess.GetDefaultFilename()
}
configToWrite, err := getConfigFromFile(destinationFile)
if err != nil {
return err
}
delete(configToWrite.AuthInfos, key)
if err := WriteToFile(*configToWrite, destinationFile); err != nil {
return err
}
}
}
return nil
} | 0.601493 | k8snetworkplumbingwg/multus-cni | vendor/k8s.io/client-go/tools/clientcmd/config.go | k8snetworkplumbingwg/multus-cni | vendor/k8s.io/client-go/tools/clientcmd/config.go | Apache-2.0 | go |
writeCurrentContext takes three possible paths.
If newCurrentContext is the same as the startingConfig's current context, then we exit.
If newCurrentContext has a value, then that value is written into the default destination file.
If newCurrentContext is empty, then we find the config file that is setting the CurrentContext and clear the value from that file | func writeCurrentContext(configAccess ConfigAccess, newCurrentContext string) error {
if startingConfig, err := configAccess.GetStartingConfig(); err != nil {
return err
} else if startingConfig.CurrentContext == newCurrentContext {
return nil
}
if configAccess.IsExplicitFile() {
file := configAccess.GetExplicitFile()
currConfig, err := getConfigFromFile(file)
if err != nil {
return err
}
currConfig.CurrentContext = newCurrentContext
if err := WriteToFile(*currConfig, file); err != nil {
return err
}
return nil
}
if len(newCurrentContext) > 0 {
destinationFile := configAccess.GetDefaultFilename()
config, err := getConfigFromFile(destinationFile)
if err != nil {
return err
}
config.CurrentContext = newCurrentContext
if err := WriteToFile(*config, destinationFile); err != nil {
return err
}
return nil
}
// we're supposed to be clearing the current context. We need to find the first spot in the chain that is setting it and clear it
for _, file := range configAccess.GetLoadingPrecedence() {
if _, err := os.Stat(file); err == nil {
currConfig, err := getConfigFromFile(file)
if err != nil {
return err
}
if len(currConfig.CurrentContext) > 0 {
currConfig.CurrentContext = newCurrentContext
if err := WriteToFile(*currConfig, file); err != nil {
return err
}
return nil
}
}
}
return errors.New("no config found to write context")
} | func resolveContextName(opts *cliflags.ClientOptions, cfg *configfile.ConfigFile) string {
if opts != nil && opts.Context != "" {
return opts.Context
}
if opts != nil && len(opts.Hosts) > 0 {
return DefaultContextName
}
if os.Getenv(client.EnvOverrideHost) != "" {
return DefaultContextName
}
if ctxName := os.Getenv(EnvOverrideContext); ctxName != "" {
return ctxName
}
if cfg != nil && cfg.CurrentContext != "" {
// We don't validate if this context exists: errors may occur when trying to use it.
return cfg.CurrentContext
}
return DefaultContextName
} | 0.527779 | k8snetworkplumbingwg/multus-cni | vendor/k8s.io/client-go/tools/clientcmd/config.go | docker/cli | cli/command/cli.go | Apache-2.0 | go |
writeCurrentContext takes three possible paths.
If newCurrentContext is the same as the startingConfig's current context, then we exit.
If newCurrentContext has a value, then that value is written into the default destination file.
If newCurrentContext is empty, then we find the config file that is setting the CurrentContext and clear the value from that file | func writeCurrentContext(configAccess ConfigAccess, newCurrentContext string) error {
if startingConfig, err := configAccess.GetStartingConfig(); err != nil {
return err
} else if startingConfig.CurrentContext == newCurrentContext {
return nil
}
if configAccess.IsExplicitFile() {
file := configAccess.GetExplicitFile()
currConfig, err := getConfigFromFile(file)
if err != nil {
return err
}
currConfig.CurrentContext = newCurrentContext
if err := WriteToFile(*currConfig, file); err != nil {
return err
}
return nil
}
if len(newCurrentContext) > 0 {
destinationFile := configAccess.GetDefaultFilename()
config, err := getConfigFromFile(destinationFile)
if err != nil {
return err
}
config.CurrentContext = newCurrentContext
if err := WriteToFile(*config, destinationFile); err != nil {
return err
}
return nil
}
// we're supposed to be clearing the current context. We need to find the first spot in the chain that is setting it and clear it
for _, file := range configAccess.GetLoadingPrecedence() {
if _, err := os.Stat(file); err == nil {
currConfig, err := getConfigFromFile(file)
if err != nil {
return err
}
if len(currConfig.CurrentContext) > 0 {
currConfig.CurrentContext = newCurrentContext
if err := WriteToFile(*currConfig, file); err != nil {
return err
}
return nil
}
}
}
return errors.New("no config found to write context")
} | func AddConfig(ctx context.Context) (context.Context, *ConfigInfo) {
c := GetConfig(ctx)
cCopy := new(ConfigInfo)
*cCopy = *c
newCtx := context.WithValue(ctx, configContextKey, cCopy)
return newCtx, cCopy
} | 0.514819 | k8snetworkplumbingwg/multus-cni | vendor/k8s.io/client-go/tools/clientcmd/config.go | rclone/rclone | fs/config.go | MIT | go |
writeCurrentContext takes three possible paths.
If newCurrentContext is the same as the startingConfig's current context, then we exit.
If newCurrentContext has a value, then that value is written into the default destination file.
If newCurrentContext is empty, then we find the config file that is setting the CurrentContext and clear the value from that file | func writeCurrentContext(configAccess ConfigAccess, newCurrentContext string) error {
if startingConfig, err := configAccess.GetStartingConfig(); err != nil {
return err
} else if startingConfig.CurrentContext == newCurrentContext {
return nil
}
if configAccess.IsExplicitFile() {
file := configAccess.GetExplicitFile()
currConfig, err := getConfigFromFile(file)
if err != nil {
return err
}
currConfig.CurrentContext = newCurrentContext
if err := WriteToFile(*currConfig, file); err != nil {
return err
}
return nil
}
if len(newCurrentContext) > 0 {
destinationFile := configAccess.GetDefaultFilename()
config, err := getConfigFromFile(destinationFile)
if err != nil {
return err
}
config.CurrentContext = newCurrentContext
if err := WriteToFile(*config, destinationFile); err != nil {
return err
}
return nil
}
// we're supposed to be clearing the current context. We need to find the first spot in the chain that is setting it and clear it
for _, file := range configAccess.GetLoadingPrecedence() {
if _, err := os.Stat(file); err == nil {
currConfig, err := getConfigFromFile(file)
if err != nil {
return err
}
if len(currConfig.CurrentContext) > 0 {
currConfig.CurrentContext = newCurrentContext
if err := WriteToFile(*currConfig, file); err != nil {
return err
}
return nil
}
}
}
return errors.New("no config found to write context")
} | func (cli *DockerCli) CurrentContext() string {
return cli.currentContext
} | 0.506051 | k8snetworkplumbingwg/multus-cni | vendor/k8s.io/client-go/tools/clientcmd/config.go | docker/cli | cli/command/cli.go | Apache-2.0 | go |
writeCurrentContext takes three possible paths.
If newCurrentContext is the same as the startingConfig's current context, then we exit.
If newCurrentContext has a value, then that value is written into the default destination file.
If newCurrentContext is empty, then we find the config file that is setting the CurrentContext and clear the value from that file | func writeCurrentContext(configAccess ConfigAccess, newCurrentContext string) error {
if startingConfig, err := configAccess.GetStartingConfig(); err != nil {
return err
} else if startingConfig.CurrentContext == newCurrentContext {
return nil
}
if configAccess.IsExplicitFile() {
file := configAccess.GetExplicitFile()
currConfig, err := getConfigFromFile(file)
if err != nil {
return err
}
currConfig.CurrentContext = newCurrentContext
if err := WriteToFile(*currConfig, file); err != nil {
return err
}
return nil
}
if len(newCurrentContext) > 0 {
destinationFile := configAccess.GetDefaultFilename()
config, err := getConfigFromFile(destinationFile)
if err != nil {
return err
}
config.CurrentContext = newCurrentContext
if err := WriteToFile(*config, destinationFile); err != nil {
return err
}
return nil
}
// we're supposed to be clearing the current context. We need to find the first spot in the chain that is setting it and clear it
for _, file := range configAccess.GetLoadingPrecedence() {
if _, err := os.Stat(file); err == nil {
currConfig, err := getConfigFromFile(file)
if err != nil {
return err
}
if len(currConfig.CurrentContext) > 0 {
currConfig.CurrentContext = newCurrentContext
if err := WriteToFile(*currConfig, file); err != nil {
return err
}
return nil
}
}
}
return errors.New("no config found to write context")
} | func resolveContextName(opts *cliflags.CommonOptions, config *configfile.ConfigFile, contextstore store.Reader) (string, error) {
if opts.Context != "" && len(opts.Hosts) > 0 {
return "", errors.New("Conflicting options: either specify --host or --context, not both")
}
if opts.Context != "" {
return opts.Context, nil
}
if len(opts.Hosts) > 0 {
return DefaultContextName, nil
}
if _, present := os.LookupEnv("DOCKER_HOST"); present {
return DefaultContextName, nil
}
if ctxName, ok := os.LookupEnv("DOCKER_CONTEXT"); ok {
return ctxName, nil
}
if config != nil && config.CurrentContext != "" {
_, err := contextstore.GetMetadata(config.CurrentContext)
if store.IsErrContextDoesNotExist(err) {
return "", errors.Errorf("Current context %q is not found on the file system, please check your config file at %s", config.CurrentContext, config.Filename)
}
return config.CurrentContext, err
}
return DefaultContextName, nil
} | 0.487746 | k8snetworkplumbingwg/multus-cni | vendor/k8s.io/client-go/tools/clientcmd/config.go | docker/app | vendor/github.com/docker/cli/cli/command/cli.go | Apache-2.0 | go |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.