query_docstring
stringlengths
24
20.8k
positive_code
stringlengths
17
325k
hard_negative_code
stringlengths
17
325k
similarity_score
float64
0.3
1
query_repo
stringclasses
407 values
query_path
stringlengths
5
170
hn_repo
stringclasses
400 values
hn_path
stringlengths
5
170
hn_license
stringclasses
4 values
language
stringclasses
1 value
C documentation /* ** Compare two search points. Return negative, zero, or positive if the first ** is less than, equal to, or greater than the second. ** ** The rScore is the primary key. Smaller rScore values come first. ** If the rScore is a tie, then use iLevel as the tie breaker with smaller ** iLevel values coming first. In this way, if rScore is the same for all ** SearchPoints, then iLevel becomes the deciding factor and the result ** is a depth-first search, which is the desired default behavior. */
func _rtreeSearchPointCompare(tls *libc.TLS, pA uintptr, pB uintptr) (r int32) { if (*TRtreeSearchPoint)(unsafe.Pointer(pA)).FrScore < (*TRtreeSearchPoint)(unsafe.Pointer(pB)).FrScore { return -int32(1) } if (*TRtreeSearchPoint)(unsafe.Pointer(pA)).FrScore > (*TRtreeSearchPoint)(unsafe.Pointer(pB)).FrScore { return +libc.Int32FromInt32(1) } if libc.Int32FromUint8((*TRtreeSearchPoint)(unsafe.Pointer(pA)).FiLevel) < libc.Int32FromUint8((*TRtreeSearchPoint)(unsafe.Pointer(pB)).FiLevel) { return -int32(1) } if libc.Int32FromUint8((*TRtreeSearchPoint)(unsafe.Pointer(pA)).FiLevel) > libc.Int32FromUint8((*TRtreeSearchPoint)(unsafe.Pointer(pB)).FiLevel) { return +libc.Int32FromInt32(1) } return 0 }
func _rtreeSearchPointFirst(tls *libc.TLS, pCur uintptr) (r uintptr) { var v1, v2 uintptr _, _ = v1, v2 if (*TRtreeCursor)(unsafe.Pointer(pCur)).FbPoint != 0 { v1 = pCur + 64 } else { if (*TRtreeCursor)(unsafe.Pointer(pCur)).FnPoint != 0 { v2 = (*TRtreeCursor)(unsafe.Pointer(pCur)).FaPoint } else { v2 = uintptr(0) } v1 = v2 } return v1 }
0.792895
umputun/spot
vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go
umputun/spot
vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go
MIT
go
C documentation /* ** Compare two search points. Return negative, zero, or positive if the first ** is less than, equal to, or greater than the second. ** ** The rScore is the primary key. Smaller rScore values come first. ** If the rScore is a tie, then use iLevel as the tie breaker with smaller ** iLevel values coming first. In this way, if rScore is the same for all ** SearchPoints, then iLevel becomes the deciding factor and the result ** is a depth-first search, which is the desired default behavior. */
func _rtreeSearchPointCompare(tls *libc.TLS, pA uintptr, pB uintptr) (r int32) { if (*TRtreeSearchPoint)(unsafe.Pointer(pA)).FrScore < (*TRtreeSearchPoint)(unsafe.Pointer(pB)).FrScore { return -int32(1) } if (*TRtreeSearchPoint)(unsafe.Pointer(pA)).FrScore > (*TRtreeSearchPoint)(unsafe.Pointer(pB)).FrScore { return +libc.Int32FromInt32(1) } if libc.Int32FromUint8((*TRtreeSearchPoint)(unsafe.Pointer(pA)).FiLevel) < libc.Int32FromUint8((*TRtreeSearchPoint)(unsafe.Pointer(pB)).FiLevel) { return -int32(1) } if libc.Int32FromUint8((*TRtreeSearchPoint)(unsafe.Pointer(pA)).FiLevel) > libc.Int32FromUint8((*TRtreeSearchPoint)(unsafe.Pointer(pB)).FiLevel) { return +libc.Int32FromInt32(1) } return 0 }
func _rtreeSearchPointPop(tls *libc.TLS, p uintptr) { var i, j, k, n, v1, v3 int32 var v2 uintptr _, _, _, _, _, _, _ = i, j, k, n, v1, v2, v3 i = int32(1) - libc.Int32FromUint8((*TRtreeCursor)(unsafe.Pointer(p)).FbPoint) if *(*uintptr)(unsafe.Pointer(p + 88 + uintptr(i)*8)) != 0 { _nodeRelease(tls, (*TRtreeCursor)(unsafe.Pointer(p)).Fbase.FpVtab, *(*uintptr)(unsafe.Pointer(p + 88 + uintptr(i)*8))) *(*uintptr)(unsafe.Pointer(p + 88 + uintptr(i)*8)) = uintptr(0) } if (*TRtreeCursor)(unsafe.Pointer(p)).FbPoint != 0 { *(*Tu32)(unsafe.Pointer(p + 128 + uintptr((*TRtreeCursor)(unsafe.Pointer(p)).FsPoint.FiLevel)*4))-- (*TRtreeCursor)(unsafe.Pointer(p)).FbPoint = uint8(0) } else { if (*TRtreeCursor)(unsafe.Pointer(p)).FnPoint != 0 { *(*Tu32)(unsafe.Pointer(p + 128 + uintptr((*(*TRtreeSearchPoint)(unsafe.Pointer((*TRtreeCursor)(unsafe.Pointer(p)).FaPoint))).FiLevel)*4))-- v2 = p + 36 *(*int32)(unsafe.Pointer(v2))-- v1 = *(*int32)(unsafe.Pointer(v2)) n = v1 *(*TRtreeSearchPoint)(unsafe.Pointer((*TRtreeCursor)(unsafe.Pointer(p)).FaPoint)) = *(*TRtreeSearchPoint)(unsafe.Pointer((*TRtreeCursor)(unsafe.Pointer(p)).FaPoint + uintptr(n)*24)) if n < libc.Int32FromInt32(RTREE_CACHE_SZ)-libc.Int32FromInt32(1) { *(*uintptr)(unsafe.Pointer(p + 88 + 1*8)) = *(*uintptr)(unsafe.Pointer(p + 88 + uintptr(n+int32(1))*8)) *(*uintptr)(unsafe.Pointer(p + 88 + uintptr(n+int32(1))*8)) = uintptr(0) } i = 0 for { v3 = i*libc.Int32FromInt32(2) + libc.Int32FromInt32(1) j = v3 if !(v3 < n) { break } k = j + int32(1) if k < n && _rtreeSearchPointCompare(tls, (*TRtreeCursor)(unsafe.Pointer(p)).FaPoint+uintptr(k)*24, (*TRtreeCursor)(unsafe.Pointer(p)).FaPoint+uintptr(j)*24) < 0 { if _rtreeSearchPointCompare(tls, (*TRtreeCursor)(unsafe.Pointer(p)).FaPoint+uintptr(k)*24, (*TRtreeCursor)(unsafe.Pointer(p)).FaPoint+uintptr(i)*24) < 0 { _rtreeSearchPointSwap(tls, p, i, k) i = k } else { break } } else { if _rtreeSearchPointCompare(tls, (*TRtreeCursor)(unsafe.Pointer(p)).FaPoint+uintptr(j)*24, (*TRtreeCursor)(unsafe.Pointer(p)).FaPoint+uintptr(i)*24) < 0 { _rtreeSearchPointSwap(tls, p, i, j) i = j } else { break } } } } } }
0.728126
umputun/spot
vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go
umputun/spot
vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go
MIT
go
C documentation /* ** Compare two search points. Return negative, zero, or positive if the first ** is less than, equal to, or greater than the second. ** ** The rScore is the primary key. Smaller rScore values come first. ** If the rScore is a tie, then use iLevel as the tie breaker with smaller ** iLevel values coming first. In this way, if rScore is the same for all ** SearchPoints, then iLevel becomes the deciding factor and the result ** is a depth-first search, which is the desired default behavior. */
func _rtreeSearchPointCompare(tls *libc.TLS, pA uintptr, pB uintptr) (r int32) { if (*TRtreeSearchPoint)(unsafe.Pointer(pA)).FrScore < (*TRtreeSearchPoint)(unsafe.Pointer(pB)).FrScore { return -int32(1) } if (*TRtreeSearchPoint)(unsafe.Pointer(pA)).FrScore > (*TRtreeSearchPoint)(unsafe.Pointer(pB)).FrScore { return +libc.Int32FromInt32(1) } if libc.Int32FromUint8((*TRtreeSearchPoint)(unsafe.Pointer(pA)).FiLevel) < libc.Int32FromUint8((*TRtreeSearchPoint)(unsafe.Pointer(pB)).FiLevel) { return -int32(1) } if libc.Int32FromUint8((*TRtreeSearchPoint)(unsafe.Pointer(pA)).FiLevel) > libc.Int32FromUint8((*TRtreeSearchPoint)(unsafe.Pointer(pB)).FiLevel) { return +libc.Int32FromInt32(1) } return 0 }
func _rtreeNodeOfFirstSearchPoint(tls *libc.TLS, pCur uintptr, pRC uintptr) (r uintptr) { var id Tsqlite3_int64 var ii int32 var v1 int64 _, _, _ = id, ii, v1 ii = int32(1) - libc.Int32FromUint8((*TRtreeCursor)(unsafe.Pointer(pCur)).FbPoint) if *(*uintptr)(unsafe.Pointer(pCur + 88 + uintptr(ii)*8)) == uintptr(0) { if ii != 0 { v1 = (*(*TRtreeSearchPoint)(unsafe.Pointer((*TRtreeCursor)(unsafe.Pointer(pCur)).FaPoint))).Fid } else { v1 = (*TRtreeCursor)(unsafe.Pointer(pCur)).FsPoint.Fid } id = v1 *(*int32)(unsafe.Pointer(pRC)) = _nodeAcquire(tls, (*TRtreeCursor)(unsafe.Pointer(pCur)).Fbase.FpVtab, id, uintptr(0), pCur+88+uintptr(ii)*8) } return *(*uintptr)(unsafe.Pointer(pCur + 88 + uintptr(ii)*8)) }
0.684883
umputun/spot
vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go
umputun/spot
vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go
MIT
go
C documentation /* ** Compare two search points. Return negative, zero, or positive if the first ** is less than, equal to, or greater than the second. ** ** The rScore is the primary key. Smaller rScore values come first. ** If the rScore is a tie, then use iLevel as the tie breaker with smaller ** iLevel values coming first. In this way, if rScore is the same for all ** SearchPoints, then iLevel becomes the deciding factor and the result ** is a depth-first search, which is the desired default behavior. */
func _rtreeSearchPointCompare(tls *libc.TLS, pA uintptr, pB uintptr) (r int32) { if (*TRtreeSearchPoint)(unsafe.Pointer(pA)).FrScore < (*TRtreeSearchPoint)(unsafe.Pointer(pB)).FrScore { return -int32(1) } if (*TRtreeSearchPoint)(unsafe.Pointer(pA)).FrScore > (*TRtreeSearchPoint)(unsafe.Pointer(pB)).FrScore { return +libc.Int32FromInt32(1) } if libc.Int32FromUint8((*TRtreeSearchPoint)(unsafe.Pointer(pA)).FiLevel) < libc.Int32FromUint8((*TRtreeSearchPoint)(unsafe.Pointer(pB)).FiLevel) { return -int32(1) } if libc.Int32FromUint8((*TRtreeSearchPoint)(unsafe.Pointer(pA)).FiLevel) > libc.Int32FromUint8((*TRtreeSearchPoint)(unsafe.Pointer(pB)).FiLevel) { return +libc.Int32FromInt32(1) } return 0 }
func _rtreeSearchPointSwap(tls *libc.TLS, p uintptr, i int32, j int32) { var pTemp uintptr var t TRtreeSearchPoint _, _ = pTemp, t t = *(*TRtreeSearchPoint)(unsafe.Pointer((*TRtreeCursor)(unsafe.Pointer(p)).FaPoint + uintptr(i)*24)) *(*TRtreeSearchPoint)(unsafe.Pointer((*TRtreeCursor)(unsafe.Pointer(p)).FaPoint + uintptr(i)*24)) = *(*TRtreeSearchPoint)(unsafe.Pointer((*TRtreeCursor)(unsafe.Pointer(p)).FaPoint + uintptr(j)*24)) *(*TRtreeSearchPoint)(unsafe.Pointer((*TRtreeCursor)(unsafe.Pointer(p)).FaPoint + uintptr(j)*24)) = t i++ j++ if i < int32(RTREE_CACHE_SZ) { if j >= int32(RTREE_CACHE_SZ) { _nodeRelease(tls, (*TRtreeCursor)(unsafe.Pointer(p)).Fbase.FpVtab, *(*uintptr)(unsafe.Pointer(p + 88 + uintptr(i)*8))) *(*uintptr)(unsafe.Pointer(p + 88 + uintptr(i)*8)) = uintptr(0) } else { pTemp = *(*uintptr)(unsafe.Pointer(p + 88 + uintptr(i)*8)) *(*uintptr)(unsafe.Pointer(p + 88 + uintptr(i)*8)) = *(*uintptr)(unsafe.Pointer(p + 88 + uintptr(j)*8)) *(*uintptr)(unsafe.Pointer(p + 88 + uintptr(j)*8)) = pTemp } } }
0.664516
umputun/spot
vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go
umputun/spot
vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go
MIT
go
C documentation /* ** Compare two search points. Return negative, zero, or positive if the first ** is less than, equal to, or greater than the second. ** ** The rScore is the primary key. Smaller rScore values come first. ** If the rScore is a tie, then use iLevel as the tie breaker with smaller ** iLevel values coming first. In this way, if rScore is the same for all ** SearchPoints, then iLevel becomes the deciding factor and the result ** is a depth-first search, which is the desired default behavior. */
func _rtreeSearchPointCompare(tls *libc.TLS, pA uintptr, pB uintptr) (r int32) { if (*TRtreeSearchPoint)(unsafe.Pointer(pA)).FrScore < (*TRtreeSearchPoint)(unsafe.Pointer(pB)).FrScore { return -int32(1) } if (*TRtreeSearchPoint)(unsafe.Pointer(pA)).FrScore > (*TRtreeSearchPoint)(unsafe.Pointer(pB)).FrScore { return +libc.Int32FromInt32(1) } if libc.Int32FromUint8((*TRtreeSearchPoint)(unsafe.Pointer(pA)).FiLevel) < libc.Int32FromUint8((*TRtreeSearchPoint)(unsafe.Pointer(pB)).FiLevel) { return -int32(1) } if libc.Int32FromUint8((*TRtreeSearchPoint)(unsafe.Pointer(pA)).FiLevel) > libc.Int32FromUint8((*TRtreeSearchPoint)(unsafe.Pointer(pB)).FiLevel) { return +libc.Int32FromInt32(1) } return 0 }
func _whereRangeVectorLen(tls *libc.TLS, pParse uintptr, iCur int32, pIdx uintptr, nEq int32, pTerm uintptr) (r int32) { var aff, idxaff int8 var i, nCmp, v1 int32 var pColl, pLhs, pRhs uintptr _, _, _, _, _, _, _, _ = aff, i, idxaff, nCmp, pColl, pLhs, pRhs, v1 nCmp = _sqlite3ExprVectorSize(tls, (*TExpr)(unsafe.Pointer((*TWhereTerm)(unsafe.Pointer(pTerm)).FpExpr)).FpLeft) if nCmp < libc.Int32FromUint16((*TIndex)(unsafe.Pointer(pIdx)).FnColumn)-nEq { v1 = nCmp } else { v1 = libc.Int32FromUint16((*TIndex)(unsafe.Pointer(pIdx)).FnColumn) - nEq } nCmp = v1 i = int32(1) for { if !(i < nCmp) { break } /* Comparison affinity */ idxaff = 0 pLhs = (*(*TExprList_item)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer((*TExpr)(unsafe.Pointer((*TWhereTerm)(unsafe.Pointer(pTerm)).FpExpr)).FpLeft + 32)) + 8 + uintptr(i)*32))).FpExpr pRhs = (*TExpr)(unsafe.Pointer((*TWhereTerm)(unsafe.Pointer(pTerm)).FpExpr)).FpRight if (*TExpr)(unsafe.Pointer(pRhs)).Fflags&uint32(EP_xIsSelect) != uint32(0) { pRhs = (*(*TExprList_item)(unsafe.Pointer((*TSelect)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pRhs + 32)))).FpEList + 8 + uintptr(i)*32))).FpExpr } else { pRhs = (*(*TExprList_item)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pRhs + 32)) + 8 + uintptr(i)*32))).FpExpr } /* Check that the LHS of the comparison is a column reference to ** the right column of the right source table. And that the sort ** order of the index column is the same as the sort order of the ** leftmost index column. */ if libc.Int32FromUint8((*TExpr)(unsafe.Pointer(pLhs)).Fop) != int32(TK_COLUMN) || (*TExpr)(unsafe.Pointer(pLhs)).FiTable != iCur || int32((*TExpr)(unsafe.Pointer(pLhs)).FiColumn) != int32(*(*Ti16)(unsafe.Pointer((*TIndex)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(i+nEq)*2))) || libc.Int32FromUint8(*(*Tu8)(unsafe.Pointer((*TIndex)(unsafe.Pointer(pIdx)).FaSortOrder + uintptr(i+nEq)))) != libc.Int32FromUint8(*(*Tu8)(unsafe.Pointer((*TIndex)(unsafe.Pointer(pIdx)).FaSortOrder + uintptr(nEq)))) { break } aff = _sqlite3CompareAffinity(tls, pRhs, _sqlite3ExprAffinity(tls, pLhs)) idxaff = _sqlite3TableColumnAffinity(tls, (*TIndex)(unsafe.Pointer(pIdx)).FpTable, int32((*TExpr)(unsafe.Pointer(pLhs)).FiColumn)) if int32(aff) != int32(idxaff) { break } pColl = _sqlite3BinaryCompareCollSeq(tls, pParse, pLhs, pRhs) if pColl == uintptr(0) { break } if _sqlite3StrICmp(tls, (*TCollSeq)(unsafe.Pointer(pColl)).FzName, *(*uintptr)(unsafe.Pointer((*TIndex)(unsafe.Pointer(pIdx)).FazColl + uintptr(i+nEq)*8))) != 0 { break } goto _2 _2: ; i++ } return i }
0.643678
umputun/spot
vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go
umputun/spot
vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go
MIT
go
Hash is a free data retrieval call binding the contract method 0x09bd5a60. Solidity: function hash() view returns(bytes32)
func (_L1Block *L1BlockCaller) Hash(opts *bind.CallOpts) ([32]byte, error) { var out []interface{} err := _L1Block.contract.Call(opts, &out, "hash") if err != nil { return *new([32]byte), err } out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) return out0, err }
func (_SystemConfig *SystemConfigCaller) BatcherHash(opts *bind.CallOpts) ([32]byte, error) { var out []interface{} err := _SystemConfig.contract.Call(opts, &out, "batcherHash") if err != nil { return *new([32]byte), err } out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) return out0, err }
0.754567
ethereum-optimism/optimism
op-e2e/bindings/l1block.go
ethereum-optimism/optimism
op-e2e/bindings/systemconfig.go
MIT
go
Hash is a free data retrieval call binding the contract method 0x09bd5a60. Solidity: function hash() view returns(bytes32)
func (_L1Block *L1BlockCaller) Hash(opts *bind.CallOpts) ([32]byte, error) { var out []interface{} err := _L1Block.contract.Call(opts, &out, "hash") if err != nil { return *new([32]byte), err } out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) return out0, err }
func (_ProtocolVersions *ProtocolVersionsCaller) Recommended(opts *bind.CallOpts) (*big.Int, error) { var out []interface{} err := _ProtocolVersions.contract.Call(opts, &out, "recommended") if err != nil { return *new(*big.Int), err } out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) return out0, err }
0.737518
ethereum-optimism/optimism
op-e2e/bindings/l1block.go
ethereum-optimism/optimism
op-e2e/bindings/protocolversions.go
MIT
go
Hash is a free data retrieval call binding the contract method 0x09bd5a60. Solidity: function hash() view returns(bytes32)
func (_L1Block *L1BlockCaller) Hash(opts *bind.CallOpts) ([32]byte, error) { var out []interface{} err := _L1Block.contract.Call(opts, &out, "hash") if err != nil { return *new([32]byte), err } out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) return out0, err }
func (_SystemConfig *SystemConfigCaller) VERSION(opts *bind.CallOpts) (*big.Int, error) { var out []interface{} err := _SystemConfig.contract.Call(opts, &out, "VERSION") if err != nil { return *new(*big.Int), err } out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) return out0, err }
0.688686
ethereum-optimism/optimism
op-e2e/bindings/l1block.go
ethereum-optimism/optimism
op-e2e/bindings/systemconfig.go
MIT
go
Hash is a free data retrieval call binding the contract method 0x09bd5a60. Solidity: function hash() view returns(bytes32)
func (_L1Block *L1BlockCaller) Hash(opts *bind.CallOpts) ([32]byte, error) { var out []interface{} err := _L1Block.contract.Call(opts, &out, "hash") if err != nil { return *new([32]byte), err } out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) return out0, err }
func (_EAS *EASCaller) GetAttestTypeHash(opts *bind.CallOpts) ([32]byte, error) { var out []interface{} err := _EAS.contract.Call(opts, &out, "getAttestTypeHash") if err != nil { return *new([32]byte), err } out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) return out0, err }
0.678339
ethereum-optimism/optimism
op-e2e/bindings/l1block.go
ethereum-optimism/optimism
op-e2e/bindings/eas.go
MIT
go
Hash is a free data retrieval call binding the contract method 0x09bd5a60. Solidity: function hash() view returns(bytes32)
func (_L1Block *L1BlockCaller) Hash(opts *bind.CallOpts) ([32]byte, error) { var out []interface{} err := _L1Block.contract.Call(opts, &out, "hash") if err != nil { return *new([32]byte), err } out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) return out0, err }
func (_MultiCall3 *MultiCall3Caller) GetBlockHash(opts *bind.CallOpts, blockNumber *big.Int) ([32]byte, error) { var out []interface{} err := _MultiCall3.contract.Call(opts, &out, "getBlockHash", blockNumber) if err != nil { return *new([32]byte), err } out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) return out0, err }
0.654293
ethereum-optimism/optimism
op-e2e/bindings/l1block.go
ethereum-optimism/optimism
op-e2e/bindings/multicall3.go
MIT
go
AddFromGatherer works like FromGatherer, but only previously pushed metrics with the same name (and the same job and other grouping labels) will be replaced. (It uses HTTP method 'POST' to push to the Pushgateway.) Deprecated: Please use a Pusher created with New instead.
func AddFromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error { return push(job, grouping, url, g, "POST") }
func FromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error { return push(job, grouping, url, g, "PUT") }
0.849171
genuinetools/binctr
vendor/github.com/prometheus/client_golang/prometheus/push/deprecated.go
genuinetools/binctr
vendor/github.com/prometheus/client_golang/prometheus/push/deprecated.go
MIT
go
AddFromGatherer works like FromGatherer, but only previously pushed metrics with the same name (and the same job and other grouping labels) will be replaced. (It uses HTTP method 'POST' to push to the Pushgateway.) Deprecated: Please use a Pusher created with New instead.
func AddFromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error { return push(job, grouping, url, g, "POST") }
func (p *Pusher) Gatherer(g prometheus.Gatherer) *Pusher { p.gatherers = append(p.gatherers, g) return p }
0.809939
genuinetools/binctr
vendor/github.com/prometheus/client_golang/prometheus/push/deprecated.go
genuinetools/binctr
vendor/github.com/prometheus/client_golang/prometheus/push/push.go
MIT
go
AddFromGatherer works like FromGatherer, but only previously pushed metrics with the same name (and the same job and other grouping labels) will be replaced. (It uses HTTP method 'POST' to push to the Pushgateway.) Deprecated: Please use a Pusher created with New instead.
func AddFromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error { return push(job, grouping, url, g, "POST") }
func (p *Pusher) Add() error { return p.push("POST") }
0.749028
genuinetools/binctr
vendor/github.com/prometheus/client_golang/prometheus/push/deprecated.go
genuinetools/binctr
vendor/github.com/prometheus/client_golang/prometheus/push/push.go
MIT
go
AddFromGatherer works like FromGatherer, but only previously pushed metrics with the same name (and the same job and other grouping labels) will be replaced. (It uses HTTP method 'POST' to push to the Pushgateway.) Deprecated: Please use a Pusher created with New instead.
func AddFromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error { return push(job, grouping, url, g, "POST") }
func Collectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error { return pushCollectors(job, grouping, url, "PUT", collectors...) }
0.738645
genuinetools/binctr
vendor/github.com/prometheus/client_golang/prometheus/push/deprecated.go
genuinetools/binctr
vendor/github.com/prometheus/client_golang/prometheus/push/deprecated.go
MIT
go
AddFromGatherer works like FromGatherer, but only previously pushed metrics with the same name (and the same job and other grouping labels) will be replaced. (It uses HTTP method 'POST' to push to the Pushgateway.) Deprecated: Please use a Pusher created with New instead.
func AddFromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error { return push(job, grouping, url, g, "POST") }
func AddCollectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error { return pushCollectors(job, grouping, url, "POST", collectors...) }
0.6819
genuinetools/binctr
vendor/github.com/prometheus/client_golang/prometheus/push/deprecated.go
genuinetools/binctr
vendor/github.com/prometheus/client_golang/prometheus/push/deprecated.go
MIT
go
StartEngagementRequest generates a "aws/request.Request" representing the client's request for the StartEngagement operation. The "output" return value will be populated with the request's response once the request completes successfully. Use "Send" method on the returned Request to send the API call to the service. the "output" return value is not valid until after Send returns without error. See StartEngagement for more information on using the StartEngagement API call, and error handling. This method is useful when you want to inject custom logic or configuration into the SDK's request lifecycle. Such as custom headers, or retry logic. // Example sending a request using the StartEngagementRequest method. req, resp := client.StartEngagementRequest(params) err := req.Send() if err == nil { // resp is now filled fmt.Println(resp) } See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-contacts-2021-05-03/StartEngagement
func (c *SSMContacts) StartEngagementRequest(input *StartEngagementInput) (req *request.Request, output *StartEngagementOutput) { op := &request.Operation{ Name: opStartEngagement, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &StartEngagementInput{} } output = &StartEngagementOutput{} req = c.newRequest(op, input, output) return }
func (c *SSMContacts) StopEngagementRequest(input *StopEngagementInput) (req *request.Request, output *StopEngagementOutput) { op := &request.Operation{ Name: opStopEngagement, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &StopEngagementInput{} } output = &StopEngagementOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return }
0.893233
aws/aws-sdk-go
service/ssmcontacts/api.go
aws/aws-sdk-go
service/ssmcontacts/api.go
Apache-2.0
go
StartEngagementRequest generates a "aws/request.Request" representing the client's request for the StartEngagement operation. The "output" return value will be populated with the request's response once the request completes successfully. Use "Send" method on the returned Request to send the API call to the service. the "output" return value is not valid until after Send returns without error. See StartEngagement for more information on using the StartEngagement API call, and error handling. This method is useful when you want to inject custom logic or configuration into the SDK's request lifecycle. Such as custom headers, or retry logic. // Example sending a request using the StartEngagementRequest method. req, resp := client.StartEngagementRequest(params) err := req.Send() if err == nil { // resp is now filled fmt.Println(resp) } See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-contacts-2021-05-03/StartEngagement
func (c *SSMContacts) StartEngagementRequest(input *StartEngagementInput) (req *request.Request, output *StartEngagementOutput) { op := &request.Operation{ Name: opStartEngagement, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &StartEngagementInput{} } output = &StartEngagementOutput{} req = c.newRequest(op, input, output) return }
func (c *SSMContacts) DescribeEngagementRequest(input *DescribeEngagementInput) (req *request.Request, output *DescribeEngagementOutput) { op := &request.Operation{ Name: opDescribeEngagement, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DescribeEngagementInput{} } output = &DescribeEngagementOutput{} req = c.newRequest(op, input, output) return }
0.881503
aws/aws-sdk-go
service/ssmcontacts/api.go
aws/aws-sdk-go
service/ssmcontacts/api.go
Apache-2.0
go
StartEngagementRequest generates a "aws/request.Request" representing the client's request for the StartEngagement operation. The "output" return value will be populated with the request's response once the request completes successfully. Use "Send" method on the returned Request to send the API call to the service. the "output" return value is not valid until after Send returns without error. See StartEngagement for more information on using the StartEngagement API call, and error handling. This method is useful when you want to inject custom logic or configuration into the SDK's request lifecycle. Such as custom headers, or retry logic. // Example sending a request using the StartEngagementRequest method. req, resp := client.StartEngagementRequest(params) err := req.Send() if err == nil { // resp is now filled fmt.Println(resp) } See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-contacts-2021-05-03/StartEngagement
func (c *SSMContacts) StartEngagementRequest(input *StartEngagementInput) (req *request.Request, output *StartEngagementOutput) { op := &request.Operation{ Name: opStartEngagement, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &StartEngagementInput{} } output = &StartEngagementOutput{} req = c.newRequest(op, input, output) return }
func (c *SSMContacts) ListEngagementsRequest(input *ListEngagementsInput) (req *request.Request, output *ListEngagementsOutput) { op := &request.Operation{ Name: opListEngagements, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"NextToken"}, OutputTokens: []string{"NextToken"}, LimitToken: "MaxResults", TruncationToken: "", }, } if input == nil { input = &ListEngagementsInput{} } output = &ListEngagementsOutput{} req = c.newRequest(op, input, output) return }
0.817393
aws/aws-sdk-go
service/ssmcontacts/api.go
aws/aws-sdk-go
service/ssmcontacts/api.go
Apache-2.0
go
StartEngagementRequest generates a "aws/request.Request" representing the client's request for the StartEngagement operation. The "output" return value will be populated with the request's response once the request completes successfully. Use "Send" method on the returned Request to send the API call to the service. the "output" return value is not valid until after Send returns without error. See StartEngagement for more information on using the StartEngagement API call, and error handling. This method is useful when you want to inject custom logic or configuration into the SDK's request lifecycle. Such as custom headers, or retry logic. // Example sending a request using the StartEngagementRequest method. req, resp := client.StartEngagementRequest(params) err := req.Send() if err == nil { // resp is now filled fmt.Println(resp) } See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-contacts-2021-05-03/StartEngagement
func (c *SSMContacts) StartEngagementRequest(input *StartEngagementInput) (req *request.Request, output *StartEngagementOutput) { op := &request.Operation{ Name: opStartEngagement, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &StartEngagementInput{} } output = &StartEngagementOutput{} req = c.newRequest(op, input, output) return }
func (c *SSMContacts) ListPagesByEngagementRequest(input *ListPagesByEngagementInput) (req *request.Request, output *ListPagesByEngagementOutput) { op := &request.Operation{ Name: opListPagesByEngagement, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"NextToken"}, OutputTokens: []string{"NextToken"}, LimitToken: "MaxResults", TruncationToken: "", }, } if input == nil { input = &ListPagesByEngagementInput{} } output = &ListPagesByEngagementOutput{} req = c.newRequest(op, input, output) return }
0.757901
aws/aws-sdk-go
service/ssmcontacts/api.go
aws/aws-sdk-go
service/ssmcontacts/api.go
Apache-2.0
go
StartEngagementRequest generates a "aws/request.Request" representing the client's request for the StartEngagement operation. The "output" return value will be populated with the request's response once the request completes successfully. Use "Send" method on the returned Request to send the API call to the service. the "output" return value is not valid until after Send returns without error. See StartEngagement for more information on using the StartEngagement API call, and error handling. This method is useful when you want to inject custom logic or configuration into the SDK's request lifecycle. Such as custom headers, or retry logic. // Example sending a request using the StartEngagementRequest method. req, resp := client.StartEngagementRequest(params) err := req.Send() if err == nil { // resp is now filled fmt.Println(resp) } See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-contacts-2021-05-03/StartEngagement
func (c *SSMContacts) StartEngagementRequest(input *StartEngagementInput) (req *request.Request, output *StartEngagementOutput) { op := &request.Operation{ Name: opStartEngagement, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &StartEngagementInput{} } output = &StartEngagementOutput{} req = c.newRequest(op, input, output) return }
func (c *SSMContacts) StartEngagementWithContext(ctx aws.Context, input *StartEngagementInput, opts ...request.Option) (*StartEngagementOutput, error) { req, out := c.StartEngagementRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() }
0.754788
aws/aws-sdk-go
service/ssmcontacts/api.go
aws/aws-sdk-go
service/ssmcontacts/api.go
Apache-2.0
go
SecureJSON serializes the given struct as Secure JSON into the response body. Default prepends "while(1)," to response body if the given struct is array values. It also sets the Content-Type as "application/json".
func (c *Context) SecureJSON(code int, obj interface{}) { c.Render(code, render.SecureJSON{Prefix: c.engine.secureJSONPrefix, Data: obj}) }
func (c *Context) IndentedJSON(code int, obj interface{}) { c.Render(code, render.IndentedJSON{Data: obj}) }
0.602362
LockGit/gochat
vendor/github.com/gin-gonic/gin/context.go
LockGit/gochat
vendor/github.com/gin-gonic/gin/context.go
MIT
go
SecureJSON serializes the given struct as Secure JSON into the response body. Default prepends "while(1)," to response body if the given struct is array values. It also sets the Content-Type as "application/json".
func (c *Context) SecureJSON(code int, obj interface{}) { c.Render(code, render.SecureJSON{Prefix: c.engine.secureJSONPrefix, Data: obj}) }
func (s *UpdateByQueryService) BodyJson(body interface{}) *UpdateByQueryService { s.bodyJson = body return s }
0.558784
LockGit/gochat
vendor/github.com/gin-gonic/gin/context.go
qiniu/logkit
vendor/gopkg.in/olivere/elastic.v3/update_by_query.go
Apache-2.0
go
SecureJSON serializes the given struct as Secure JSON into the response body. Default prepends "while(1)," to response body if the given struct is array values. It also sets the Content-Type as "application/json".
func (c *Context) SecureJSON(code int, obj interface{}) { c.Render(code, render.SecureJSON{Prefix: c.engine.secureJSONPrefix, Data: obj}) }
func (c *Client) JSONCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, body, resp interface{}) error { if qv == nil { qv = url.Values{} } v := reflect.ValueOf(resp) if err := c.checkResp(v); err != nil { return err } // Choose a JSON marshal/unmarshal depending on if we have AdditionalFields attribute. var marshal = json.Marshal var unmarshal = json.Unmarshal if _, ok := v.Elem().Type().FieldByName("AdditionalFields"); ok { marshal = customJSON.Marshal unmarshal = customJSON.Unmarshal } req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("%s?%s", endpoint, qv.Encode()), nil) if err != nil { return fmt.Errorf("could not create request: %w", err) } addStdHeaders(headers) req.Header = headers if body != nil { // Note: In case your wondering why we are not gzip encoding.... // I'm not sure if these various services support gzip on send. headers.Add("Content-Type", "application/json; charset=utf-8") data, err := marshal(body) if err != nil { return fmt.Errorf("bug: conn.Call(): could not marshal the body object: %w", err) } req.Body = io.NopCloser(bytes.NewBuffer(data)) req.Method = http.MethodPost } data, err := c.do(ctx, req) if err != nil { return err } if resp != nil { if err := unmarshal(data, resp); err != nil { return fmt.Errorf("json decode error: %w\njson message bytes were: %s", err, string(data)) } } return nil }
0.537669
LockGit/gochat
vendor/github.com/gin-gonic/gin/context.go
tektoncd/cli
vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go
Apache-2.0
go
SecureJSON serializes the given struct as Secure JSON into the response body. Default prepends "while(1)," to response body if the given struct is array values. It also sets the Content-Type as "application/json".
func (c *Context) SecureJSON(code int, obj interface{}) { c.Render(code, render.SecureJSON{Prefix: c.engine.secureJSONPrefix, Data: obj}) }
func (s *CountService) BodyJson(body interface{}) *CountService { s.bodyJson = body return s }
0.53759
LockGit/gochat
vendor/github.com/gin-gonic/gin/context.go
qiniu/logkit
vendor/gopkg.in/olivere/elastic.v3/count.go
Apache-2.0
go
SecureJSON serializes the given struct as Secure JSON into the response body. Default prepends "while(1)," to response body if the given struct is array values. It also sets the Content-Type as "application/json".
func (c *Context) SecureJSON(code int, obj interface{}) { c.Render(code, render.SecureJSON{Prefix: c.engine.secureJSONPrefix, Data: obj}) }
func (p Parsed) MarshalJSON() ([]byte, error) { return []byte(`"` + p.String() + `"`), nil }
0.532955
LockGit/gochat
vendor/github.com/gin-gonic/gin/context.go
google/osv-scanner
internal/imodels/ecosystem/ecosystem.go
Apache-2.0
go
StartExportTaskRequest generates a "aws/request.Request" representing the client's request for the StartExportTask operation. The "output" return value will be populated with the request's response once the request completes successfully. Use "Send" method on the returned Request to send the API call to the service. the "output" return value is not valid until after Send returns without error. See StartExportTask for more information on using the StartExportTask API call, and error handling. This method is useful when you want to inject custom logic or configuration into the SDK's request lifecycle. Such as custom headers, or retry logic. // Example sending a request using the StartExportTaskRequest method. req, resp := client.StartExportTaskRequest(params) err := req.Send() if err == nil { // resp is now filled fmt.Println(resp) } See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/StartExportTask
func (c *RDS) StartExportTaskRequest(input *StartExportTaskInput) (req *request.Request, output *StartExportTaskOutput) { op := &request.Operation{ Name: opStartExportTask, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &StartExportTaskInput{} } output = &StartExportTaskOutput{} req = c.newRequest(op, input, output) return }
func (c *ApplicationDiscoveryService) StartExportTaskRequest(input *StartExportTaskInput) (req *request.Request, output *StartExportTaskOutput) { op := &request.Operation{ Name: opStartExportTask, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &StartExportTaskInput{} } output = &StartExportTaskOutput{} req = c.newRequest(op, input, output) return }
0.985419
aws/aws-sdk-go
service/rds/api.go
aws/aws-sdk-go
service/applicationdiscoveryservice/api.go
Apache-2.0
go
StartExportTaskRequest generates a "aws/request.Request" representing the client's request for the StartExportTask operation. The "output" return value will be populated with the request's response once the request completes successfully. Use "Send" method on the returned Request to send the API call to the service. the "output" return value is not valid until after Send returns without error. See StartExportTask for more information on using the StartExportTask API call, and error handling. This method is useful when you want to inject custom logic or configuration into the SDK's request lifecycle. Such as custom headers, or retry logic. // Example sending a request using the StartExportTaskRequest method. req, resp := client.StartExportTaskRequest(params) err := req.Send() if err == nil { // resp is now filled fmt.Println(resp) } See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/StartExportTask
func (c *RDS) StartExportTaskRequest(input *StartExportTaskInput) (req *request.Request, output *StartExportTaskOutput) { op := &request.Operation{ Name: opStartExportTask, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &StartExportTaskInput{} } output = &StartExportTaskOutput{} req = c.newRequest(op, input, output) return }
func (c *CloudWatchLogs) CreateExportTaskRequest(input *CreateExportTaskInput) (req *request.Request, output *CreateExportTaskOutput) { op := &request.Operation{ Name: opCreateExportTask, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &CreateExportTaskInput{} } output = &CreateExportTaskOutput{} req = c.newRequest(op, input, output) return }
0.903833
aws/aws-sdk-go
service/rds/api.go
aws/aws-sdk-go
service/cloudwatchlogs/api.go
Apache-2.0
go
StartExportTaskRequest generates a "aws/request.Request" representing the client's request for the StartExportTask operation. The "output" return value will be populated with the request's response once the request completes successfully. Use "Send" method on the returned Request to send the API call to the service. the "output" return value is not valid until after Send returns without error. See StartExportTask for more information on using the StartExportTask API call, and error handling. This method is useful when you want to inject custom logic or configuration into the SDK's request lifecycle. Such as custom headers, or retry logic. // Example sending a request using the StartExportTaskRequest method. req, resp := client.StartExportTaskRequest(params) err := req.Send() if err == nil { // resp is now filled fmt.Println(resp) } See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/StartExportTask
func (c *RDS) StartExportTaskRequest(input *StartExportTaskInput) (req *request.Request, output *StartExportTaskOutput) { op := &request.Operation{ Name: opStartExportTask, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &StartExportTaskInput{} } output = &StartExportTaskOutput{} req = c.newRequest(op, input, output) return }
func (c *Mgn) StartExportRequest(input *StartExportInput) (req *request.Request, output *StartExportOutput) { op := &request.Operation{ Name: opStartExport, HTTPMethod: "POST", HTTPPath: "/StartExport", } if input == nil { input = &StartExportInput{} } output = &StartExportOutput{} req = c.newRequest(op, input, output) return }
0.874884
aws/aws-sdk-go
service/rds/api.go
aws/aws-sdk-go
service/mgn/api.go
Apache-2.0
go
StartExportTaskRequest generates a "aws/request.Request" representing the client's request for the StartExportTask operation. The "output" return value will be populated with the request's response once the request completes successfully. Use "Send" method on the returned Request to send the API call to the service. the "output" return value is not valid until after Send returns without error. See StartExportTask for more information on using the StartExportTask API call, and error handling. This method is useful when you want to inject custom logic or configuration into the SDK's request lifecycle. Such as custom headers, or retry logic. // Example sending a request using the StartExportTaskRequest method. req, resp := client.StartExportTaskRequest(params) err := req.Send() if err == nil { // resp is now filled fmt.Println(resp) } See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/StartExportTask
func (c *RDS) StartExportTaskRequest(input *StartExportTaskInput) (req *request.Request, output *StartExportTaskOutput) { op := &request.Operation{ Name: opStartExportTask, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &StartExportTaskInput{} } output = &StartExportTaskOutput{} req = c.newRequest(op, input, output) return }
func (c *ApplicationDiscoveryService) StartImportTaskRequest(input *StartImportTaskInput) (req *request.Request, output *StartImportTaskOutput) { op := &request.Operation{ Name: opStartImportTask, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &StartImportTaskInput{} } output = &StartImportTaskOutput{} req = c.newRequest(op, input, output) return }
0.817418
aws/aws-sdk-go
service/rds/api.go
aws/aws-sdk-go
service/applicationdiscoveryservice/api.go
Apache-2.0
go
StartExportTaskRequest generates a "aws/request.Request" representing the client's request for the StartExportTask operation. The "output" return value will be populated with the request's response once the request completes successfully. Use "Send" method on the returned Request to send the API call to the service. the "output" return value is not valid until after Send returns without error. See StartExportTask for more information on using the StartExportTask API call, and error handling. This method is useful when you want to inject custom logic or configuration into the SDK's request lifecycle. Such as custom headers, or retry logic. // Example sending a request using the StartExportTaskRequest method. req, resp := client.StartExportTaskRequest(params) err := req.Send() if err == nil { // resp is now filled fmt.Println(resp) } See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/StartExportTask
func (c *RDS) StartExportTaskRequest(input *StartExportTaskInput) (req *request.Request, output *StartExportTaskOutput) { op := &request.Operation{ Name: opStartExportTask, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &StartExportTaskInput{} } output = &StartExportTaskOutput{} req = c.newRequest(op, input, output) return }
func (c *ECS) StartTaskRequest(input *StartTaskInput) (req *request.Request, output *StartTaskOutput) { op := &request.Operation{ Name: opStartTask, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &StartTaskInput{} } output = &StartTaskOutput{} req = c.newRequest(op, input, output) return }
0.814148
aws/aws-sdk-go
service/rds/api.go
aws/aws-sdk-go
service/ecs/api.go
Apache-2.0
go
UpdateSMBLocalGroupsWithContext is the same as UpdateSMBLocalGroups with the addition of the ability to pass a context and additional request options. See UpdateSMBLocalGroups for details on how to use this API operation. The context must be non-nil and will be used for request cancellation. If the context is nil a panic will occur. In the future the SDK may create sub-contexts for http.Requests. See https://golang.org/pkg/context/ for more information on using Contexts.
func (c *StorageGateway) UpdateSMBLocalGroupsWithContext(ctx aws.Context, input *UpdateSMBLocalGroupsInput, opts ...request.Option) (*UpdateSMBLocalGroupsOutput, error) { req, out := c.UpdateSMBLocalGroupsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() }
func (c *StorageGateway) UpdateSMBLocalGroupsRequest(input *UpdateSMBLocalGroupsInput) (req *request.Request, output *UpdateSMBLocalGroupsOutput) { op := &request.Operation{ Name: opUpdateSMBLocalGroups, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &UpdateSMBLocalGroupsInput{} } output = &UpdateSMBLocalGroupsOutput{} req = c.newRequest(op, input, output) return }
0.842807
aws/aws-sdk-go
service/storagegateway/api.go
aws/aws-sdk-go
service/storagegateway/api.go
Apache-2.0
go
UpdateSMBLocalGroupsWithContext is the same as UpdateSMBLocalGroups with the addition of the ability to pass a context and additional request options. See UpdateSMBLocalGroups for details on how to use this API operation. The context must be non-nil and will be used for request cancellation. If the context is nil a panic will occur. In the future the SDK may create sub-contexts for http.Requests. See https://golang.org/pkg/context/ for more information on using Contexts.
func (c *StorageGateway) UpdateSMBLocalGroupsWithContext(ctx aws.Context, input *UpdateSMBLocalGroupsInput, opts ...request.Option) (*UpdateSMBLocalGroupsOutput, error) { req, out := c.UpdateSMBLocalGroupsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() }
func (c *StorageGateway) UpdateSMBFileShareWithContext(ctx aws.Context, input *UpdateSMBFileShareInput, opts ...request.Option) (*UpdateSMBFileShareOutput, error) { req, out := c.UpdateSMBFileShareRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() }
0.715591
aws/aws-sdk-go
service/storagegateway/api.go
aws/aws-sdk-go
service/storagegateway/api.go
Apache-2.0
go
UpdateSMBLocalGroupsWithContext is the same as UpdateSMBLocalGroups with the addition of the ability to pass a context and additional request options. See UpdateSMBLocalGroups for details on how to use this API operation. The context must be non-nil and will be used for request cancellation. If the context is nil a panic will occur. In the future the SDK may create sub-contexts for http.Requests. See https://golang.org/pkg/context/ for more information on using Contexts.
func (c *StorageGateway) UpdateSMBLocalGroupsWithContext(ctx aws.Context, input *UpdateSMBLocalGroupsInput, opts ...request.Option) (*UpdateSMBLocalGroupsOutput, error) { req, out := c.UpdateSMBLocalGroupsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() }
func (c *IoTWireless) UpdateMulticastGroupWithContext(ctx aws.Context, input *UpdateMulticastGroupInput, opts ...request.Option) (*UpdateMulticastGroupOutput, error) { req, out := c.UpdateMulticastGroupRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() }
0.685167
aws/aws-sdk-go
service/storagegateway/api.go
aws/aws-sdk-go
service/iotwireless/api.go
Apache-2.0
go
UpdateSMBLocalGroupsWithContext is the same as UpdateSMBLocalGroups with the addition of the ability to pass a context and additional request options. See UpdateSMBLocalGroups for details on how to use this API operation. The context must be non-nil and will be used for request cancellation. If the context is nil a panic will occur. In the future the SDK may create sub-contexts for http.Requests. See https://golang.org/pkg/context/ for more information on using Contexts.
func (c *StorageGateway) UpdateSMBLocalGroupsWithContext(ctx aws.Context, input *UpdateSMBLocalGroupsInput, opts ...request.Option) (*UpdateSMBLocalGroupsOutput, error) { req, out := c.UpdateSMBLocalGroupsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() }
func (c *StorageGateway) UpdateSMBSecurityStrategyWithContext(ctx aws.Context, input *UpdateSMBSecurityStrategyInput, opts ...request.Option) (*UpdateSMBSecurityStrategyOutput, error) { req, out := c.UpdateSMBSecurityStrategyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() }
0.668366
aws/aws-sdk-go
service/storagegateway/api.go
aws/aws-sdk-go
service/storagegateway/api.go
Apache-2.0
go
UpdateSMBLocalGroupsWithContext is the same as UpdateSMBLocalGroups with the addition of the ability to pass a context and additional request options. See UpdateSMBLocalGroups for details on how to use this API operation. The context must be non-nil and will be used for request cancellation. If the context is nil a panic will occur. In the future the SDK may create sub-contexts for http.Requests. See https://golang.org/pkg/context/ for more information on using Contexts.
func (c *StorageGateway) UpdateSMBLocalGroupsWithContext(ctx aws.Context, input *UpdateSMBLocalGroupsInput, opts ...request.Option) (*UpdateSMBLocalGroupsOutput, error) { req, out := c.UpdateSMBLocalGroupsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() }
func (c *StorageGateway) UpdateSMBLocalGroups(input *UpdateSMBLocalGroupsInput) (*UpdateSMBLocalGroupsOutput, error) { req, out := c.UpdateSMBLocalGroupsRequest(input) return out, req.Send() }
0.662501
aws/aws-sdk-go
service/storagegateway/api.go
aws/aws-sdk-go
service/storagegateway/api.go
Apache-2.0
go
ListPackageVersions API operation for AWS IoT. Lists the software package versions associated to the account. Requires permission to access the ListPackageVersions (https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsiot.html#awsiot-actions-as-permissions) action. Returns awserr.Error for service API and SDK errors. Use runtime type assertions with awserr.Error's Code and Message methods to get detailed information about the error. See the AWS API reference guide for AWS IoT's API operation ListPackageVersions for usage and error information. Returned Error Types: - ThrottlingException The rate exceeds the limit. - InternalServerException Internal error from the service that indicates an unexpected error or that the service is unavailable. - ValidationException The request is not valid.
func (c *IoT) ListPackageVersions(input *ListPackageVersionsInput) (*ListPackageVersionsOutput, error) { req, out := c.ListPackageVersionsRequest(input) return out, req.Send() }
func (c *CodeArtifact) ListPackageVersions(input *ListPackageVersionsInput) (*ListPackageVersionsOutput, error) { req, out := c.ListPackageVersionsRequest(input) return out, req.Send() }
0.859447
aws/aws-sdk-go
service/iot/api.go
aws/aws-sdk-go
service/codeartifact/api.go
Apache-2.0
go
ListPackageVersions API operation for AWS IoT. Lists the software package versions associated to the account. Requires permission to access the ListPackageVersions (https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsiot.html#awsiot-actions-as-permissions) action. Returns awserr.Error for service API and SDK errors. Use runtime type assertions with awserr.Error's Code and Message methods to get detailed information about the error. See the AWS API reference guide for AWS IoT's API operation ListPackageVersions for usage and error information. Returned Error Types: - ThrottlingException The rate exceeds the limit. - InternalServerException Internal error from the service that indicates an unexpected error or that the service is unavailable. - ValidationException The request is not valid.
func (c *IoT) ListPackageVersions(input *ListPackageVersionsInput) (*ListPackageVersionsOutput, error) { req, out := c.ListPackageVersionsRequest(input) return out, req.Send() }
func (c *IoT) GetPackageVersion(input *GetPackageVersionInput) (*GetPackageVersionOutput, error) { req, out := c.GetPackageVersionRequest(input) return out, req.Send() }
0.809434
aws/aws-sdk-go
service/iot/api.go
aws/aws-sdk-go
service/iot/api.go
Apache-2.0
go
ListPackageVersions API operation for AWS IoT. Lists the software package versions associated to the account. Requires permission to access the ListPackageVersions (https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsiot.html#awsiot-actions-as-permissions) action. Returns awserr.Error for service API and SDK errors. Use runtime type assertions with awserr.Error's Code and Message methods to get detailed information about the error. See the AWS API reference guide for AWS IoT's API operation ListPackageVersions for usage and error information. Returned Error Types: - ThrottlingException The rate exceeds the limit. - InternalServerException Internal error from the service that indicates an unexpected error or that the service is unavailable. - ValidationException The request is not valid.
func (c *IoT) ListPackageVersions(input *ListPackageVersionsInput) (*ListPackageVersionsOutput, error) { req, out := c.ListPackageVersionsRequest(input) return out, req.Send() }
func (c *IoT) ListPackages(input *ListPackagesInput) (*ListPackagesOutput, error) { req, out := c.ListPackagesRequest(input) return out, req.Send() }
0.804295
aws/aws-sdk-go
service/iot/api.go
aws/aws-sdk-go
service/iot/api.go
Apache-2.0
go
ListPackageVersions API operation for AWS IoT. Lists the software package versions associated to the account. Requires permission to access the ListPackageVersions (https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsiot.html#awsiot-actions-as-permissions) action. Returns awserr.Error for service API and SDK errors. Use runtime type assertions with awserr.Error's Code and Message methods to get detailed information about the error. See the AWS API reference guide for AWS IoT's API operation ListPackageVersions for usage and error information. Returned Error Types: - ThrottlingException The rate exceeds the limit. - InternalServerException Internal error from the service that indicates an unexpected error or that the service is unavailable. - ValidationException The request is not valid.
func (c *IoT) ListPackageVersions(input *ListPackageVersionsInput) (*ListPackageVersionsOutput, error) { req, out := c.ListPackageVersionsRequest(input) return out, req.Send() }
func (c *IoT) GetPackage(input *GetPackageInput) (*GetPackageOutput, error) { req, out := c.GetPackageRequest(input) return out, req.Send() }
0.7575
aws/aws-sdk-go
service/iot/api.go
aws/aws-sdk-go
service/iot/api.go
Apache-2.0
go
ListPackageVersions API operation for AWS IoT. Lists the software package versions associated to the account. Requires permission to access the ListPackageVersions (https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsiot.html#awsiot-actions-as-permissions) action. Returns awserr.Error for service API and SDK errors. Use runtime type assertions with awserr.Error's Code and Message methods to get detailed information about the error. See the AWS API reference guide for AWS IoT's API operation ListPackageVersions for usage and error information. Returned Error Types: - ThrottlingException The rate exceeds the limit. - InternalServerException Internal error from the service that indicates an unexpected error or that the service is unavailable. - ValidationException The request is not valid.
func (c *IoT) ListPackageVersions(input *ListPackageVersionsInput) (*ListPackageVersionsOutput, error) { req, out := c.ListPackageVersionsRequest(input) return out, req.Send() }
func (c *ElasticsearchService) GetPackageVersionHistory(input *GetPackageVersionHistoryInput) (*GetPackageVersionHistoryOutput, error) { req, out := c.GetPackageVersionHistoryRequest(input) return out, req.Send() }
0.75291
aws/aws-sdk-go
service/iot/api.go
aws/aws-sdk-go
service/elasticsearchservice/api.go
Apache-2.0
go
GetRuleIndexMap Get a map from rule names to rule indexes. Used for XPath and tree pattern compilation. TODO: JI This is not yet implemented in the Go runtime. Maybe not needed.
func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { panic("Method not defined!") // var ruleNames = b.GetRuleNames() // if (ruleNames==nil) { // panic("The current recognizer does not provide a list of rule names.") // } // // var result = ruleIndexMapCache[ruleNames] // if(result==nil) { // result = ruleNames.reduce(function(o, k, i) { o[k] = i }) // ruleIndexMapCache[ruleNames] = result // } // return result }
func New(tp elastictransport.Interface) *GetRule { r := &GetRule{ transport: tp, values: make(url.Values), headers: make(http.Header), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { if instrument := instrumented.InstrumentationEnabled(); instrument != nil { r.instrument = instrument } } return r }
0.563045
google/cel-go
vendor/github.com/antlr4-go/antlr/v4/recognizer.go
elastic/go-elasticsearch
typedapi/queryrules/getrule/get_rule.go
Apache-2.0
go
GetRuleIndexMap Get a map from rule names to rule indexes. Used for XPath and tree pattern compilation. TODO: JI This is not yet implemented in the Go runtime. Maybe not needed.
func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { panic("Method not defined!") // var ruleNames = b.GetRuleNames() // if (ruleNames==nil) { // panic("The current recognizer does not provide a list of rule names.") // } // // var result = ruleIndexMapCache[ruleNames] // if(result==nil) { // result = ruleNames.reduce(function(o, k, i) { o[k] = i }) // ruleIndexMapCache[ruleNames] = result // } // return result }
func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string { if c == nil { c = p.ctx } stack := make([]string, 0) for c != nil { // compute what follows who invoked us ruleIndex := c.GetRuleIndex() if ruleIndex < 0 { stack = append(stack, "n/a") } else { stack = append(stack, p.GetRuleNames()[ruleIndex]) } vp := c.GetParent() if vp == nil { break } c = vp.(ParserRuleContext) } return stack }
0.517423
google/cel-go
vendor/github.com/antlr4-go/antlr/v4/recognizer.go
google/cel-go
vendor/github.com/antlr4-go/antlr/v4/parser.go
Apache-2.0
go
GetRuleIndexMap Get a map from rule names to rule indexes. Used for XPath and tree pattern compilation. TODO: JI This is not yet implemented in the Go runtime. Maybe not needed.
func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { panic("Method not defined!") // var ruleNames = b.GetRuleNames() // if (ruleNames==nil) { // panic("The current recognizer does not provide a list of rule names.") // } // // var result = ruleIndexMapCache[ruleNames] // if(result==nil) { // result = ruleNames.reduce(function(o, k, i) { o[k] = i }) // ruleIndexMapCache[ruleNames] = result // } // return result }
func (m *NotificationEventMutation) RulesIDs() (ids []string) { if id := m.rules; id != nil { ids = append(ids, *id) } return }
0.506687
google/cel-go
vendor/github.com/antlr4-go/antlr/v4/recognizer.go
openmeterio/openmeter
openmeter/ent/db/mutation.go
Apache-2.0
go
GetRuleIndexMap Get a map from rule names to rule indexes. Used for XPath and tree pattern compilation. TODO: JI This is not yet implemented in the Go runtime. Maybe not needed.
func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { panic("Method not defined!") // var ruleNames = b.GetRuleNames() // if (ruleNames==nil) { // panic("The current recognizer does not provide a list of rule names.") // } // // var result = ruleIndexMapCache[ruleNames] // if(result==nil) { // result = ruleNames.reduce(function(o, k, i) { o[k] = i }) // ruleIndexMapCache[ruleNames] = result // } // return result }
func (h *Handle) RuleListFiltered(family int, filter *Rule, filterMask uint64) ([]Rule, error) { req := h.newNetlinkRequest(unix.RTM_GETRULE, unix.NLM_F_DUMP|unix.NLM_F_REQUEST) msg := nl.NewIfInfomsg(family) req.AddData(msg) msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWRULE) if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { return nil, executeErr } var res = make([]Rule, 0) for i := range msgs { msg := nl.DeserializeRtMsg(msgs[i]) attrs, err := nl.ParseRouteAttr(msgs[i][msg.Len():]) if err != nil { return nil, err } rule := NewRule() rule.Priority = 0 // The default priority from kernel rule.Invert = msg.Flags&FibRuleInvert > 0 rule.Family = int(msg.Family) rule.Tos = uint(msg.Tos) for j := range attrs { switch attrs[j].Attr.Type { case unix.RTA_TABLE: rule.Table = int(native.Uint32(attrs[j].Value[0:4])) case nl.FRA_SRC: rule.Src = &net.IPNet{ IP: attrs[j].Value, Mask: net.CIDRMask(int(msg.Src_len), 8*len(attrs[j].Value)), } case nl.FRA_DST: rule.Dst = &net.IPNet{ IP: attrs[j].Value, Mask: net.CIDRMask(int(msg.Dst_len), 8*len(attrs[j].Value)), } case nl.FRA_FWMARK: rule.Mark = native.Uint32(attrs[j].Value[0:4]) case nl.FRA_FWMASK: mask := native.Uint32(attrs[j].Value[0:4]) rule.Mask = &mask case nl.FRA_TUN_ID: rule.TunID = uint(native.Uint64(attrs[j].Value[0:8])) case nl.FRA_IIFNAME: rule.IifName = string(attrs[j].Value[:len(attrs[j].Value)-1]) case nl.FRA_OIFNAME: rule.OifName = string(attrs[j].Value[:len(attrs[j].Value)-1]) case nl.FRA_SUPPRESS_PREFIXLEN: i := native.Uint32(attrs[j].Value[0:4]) if i != 0xffffffff { rule.SuppressPrefixlen = int(i) } case nl.FRA_SUPPRESS_IFGROUP: i := native.Uint32(attrs[j].Value[0:4]) if i != 0xffffffff { rule.SuppressIfgroup = int(i) } case nl.FRA_FLOW: rule.Flow = int(native.Uint32(attrs[j].Value[0:4])) case nl.FRA_GOTO: rule.Goto = int(native.Uint32(attrs[j].Value[0:4])) case nl.FRA_PRIORITY: rule.Priority = int(native.Uint32(attrs[j].Value[0:4])) case nl.FRA_IP_PROTO: rule.IPProto = int(native.Uint32(attrs[j].Value[0:4])) case nl.FRA_DPORT_RANGE: rule.Dport = NewRulePortRange(native.Uint16(attrs[j].Value[0:2]), native.Uint16(attrs[j].Value[2:4])) case nl.FRA_SPORT_RANGE: rule.Sport = NewRulePortRange(native.Uint16(attrs[j].Value[0:2]), native.Uint16(attrs[j].Value[2:4])) case nl.FRA_UID_RANGE: rule.UIDRange = NewRuleUIDRange(native.Uint32(attrs[j].Value[0:4]), native.Uint32(attrs[j].Value[4:8])) case nl.FRA_PROTOCOL: rule.Protocol = uint8(attrs[j].Value[0]) } } if filter != nil { switch { case filterMask&RT_FILTER_SRC != 0 && (rule.Src == nil || rule.Src.String() != filter.Src.String()): continue case filterMask&RT_FILTER_DST != 0 && (rule.Dst == nil || rule.Dst.String() != filter.Dst.String()): continue case filterMask&RT_FILTER_TABLE != 0 && filter.Table != unix.RT_TABLE_UNSPEC && rule.Table != filter.Table: continue case filterMask&RT_FILTER_TOS != 0 && rule.Tos != filter.Tos: continue case filterMask&RT_FILTER_PRIORITY != 0 && rule.Priority != filter.Priority: continue case filterMask&RT_FILTER_MARK != 0 && rule.Mark != filter.Mark: continue case filterMask&RT_FILTER_MASK != 0 && !ptrEqual(rule.Mask, filter.Mask): continue } } res = append(res, *rule) } return res, executeErr }
0.503625
google/cel-go
vendor/github.com/antlr4-go/antlr/v4/recognizer.go
moby/buildkit
vendor/github.com/vishvananda/netlink/rule_linux.go
Apache-2.0
go
GetRuleIndexMap Get a map from rule names to rule indexes. Used for XPath and tree pattern compilation. TODO: JI This is not yet implemented in the Go runtime. Maybe not needed.
func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { panic("Method not defined!") // var ruleNames = b.GetRuleNames() // if (ruleNames==nil) { // panic("The current recognizer does not provide a list of rule names.") // } // // var result = ruleIndexMapCache[ruleNames] // if(result==nil) { // result = ruleNames.reduce(function(o, k, i) { o[k] = i }) // ruleIndexMapCache[ruleNames] = result // } // return result }
func (h *Handle) RuleList(family int) ([]Rule, error) { return h.RuleListFiltered(family, nil, 0) }
0.499402
google/cel-go
vendor/github.com/antlr4-go/antlr/v4/recognizer.go
moby/buildkit
vendor/github.com/vishvananda/netlink/rule_linux.go
Apache-2.0
go
DescribeOperatingSystemsWithContext is the same as DescribeOperatingSystems with the addition of the ability to pass a context and additional request options. See DescribeOperatingSystems for details on how to use this API operation. The context must be non-nil and will be used for request cancellation. If the context is nil a panic will occur. In the future the SDK may create sub-contexts for http.Requests. See https://golang.org/pkg/context/ for more information on using Contexts.
func (c *OpsWorks) DescribeOperatingSystemsWithContext(ctx aws.Context, input *DescribeOperatingSystemsInput, opts ...request.Option) (*DescribeOperatingSystemsOutput, error) { req, out := c.DescribeOperatingSystemsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() }
func (c *OpsWorks) DescribeOperatingSystemsRequest(input *DescribeOperatingSystemsInput) (req *request.Request, output *DescribeOperatingSystemsOutput) { op := &request.Operation{ Name: opDescribeOperatingSystems, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DescribeOperatingSystemsInput{} } output = &DescribeOperatingSystemsOutput{} req = c.newRequest(op, input, output) return }
0.801551
aws/aws-sdk-go
service/opsworks/api.go
aws/aws-sdk-go
service/opsworks/api.go
Apache-2.0
go
DescribeOperatingSystemsWithContext is the same as DescribeOperatingSystems with the addition of the ability to pass a context and additional request options. See DescribeOperatingSystems for details on how to use this API operation. The context must be non-nil and will be used for request cancellation. If the context is nil a panic will occur. In the future the SDK may create sub-contexts for http.Requests. See https://golang.org/pkg/context/ for more information on using Contexts.
func (c *OpsWorks) DescribeOperatingSystemsWithContext(ctx aws.Context, input *DescribeOperatingSystemsInput, opts ...request.Option) (*DescribeOperatingSystemsOutput, error) { req, out := c.DescribeOperatingSystemsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() }
func (c *OpsWorks) DescribeOperatingSystems(input *DescribeOperatingSystemsInput) (*DescribeOperatingSystemsOutput, error) { req, out := c.DescribeOperatingSystemsRequest(input) return out, req.Send() }
0.789323
aws/aws-sdk-go
service/opsworks/api.go
aws/aws-sdk-go
service/opsworks/api.go
Apache-2.0
go
DescribeOperatingSystemsWithContext is the same as DescribeOperatingSystems with the addition of the ability to pass a context and additional request options. See DescribeOperatingSystems for details on how to use this API operation. The context must be non-nil and will be used for request cancellation. If the context is nil a panic will occur. In the future the SDK may create sub-contexts for http.Requests. See https://golang.org/pkg/context/ for more information on using Contexts.
func (c *OpsWorks) DescribeOperatingSystemsWithContext(ctx aws.Context, input *DescribeOperatingSystemsInput, opts ...request.Option) (*DescribeOperatingSystemsOutput, error) { req, out := c.DescribeOperatingSystemsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() }
func (c *MediaLive) DescribeOfferingWithContext(ctx aws.Context, input *DescribeOfferingInput, opts ...request.Option) (*DescribeOfferingOutput, error) { req, out := c.DescribeOfferingRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() }
0.616951
aws/aws-sdk-go
service/opsworks/api.go
aws/aws-sdk-go
service/medialive/api.go
Apache-2.0
go
DescribeOperatingSystemsWithContext is the same as DescribeOperatingSystems with the addition of the ability to pass a context and additional request options. See DescribeOperatingSystems for details on how to use this API operation. The context must be non-nil and will be used for request cancellation. If the context is nil a panic will occur. In the future the SDK may create sub-contexts for http.Requests. See https://golang.org/pkg/context/ for more information on using Contexts.
func (c *OpsWorks) DescribeOperatingSystemsWithContext(ctx aws.Context, input *DescribeOperatingSystemsInput, opts ...request.Option) (*DescribeOperatingSystemsOutput, error) { req, out := c.DescribeOperatingSystemsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() }
func (c *FSx) DescribeFileSystemsWithContext(ctx aws.Context, input *DescribeFileSystemsInput, opts ...request.Option) (*DescribeFileSystemsOutput, error) { req, out := c.DescribeFileSystemsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() }
0.599251
aws/aws-sdk-go
service/opsworks/api.go
aws/aws-sdk-go
service/fsx/api.go
Apache-2.0
go
DescribeOperatingSystemsWithContext is the same as DescribeOperatingSystems with the addition of the ability to pass a context and additional request options. See DescribeOperatingSystems for details on how to use this API operation. The context must be non-nil and will be used for request cancellation. If the context is nil a panic will occur. In the future the SDK may create sub-contexts for http.Requests. See https://golang.org/pkg/context/ for more information on using Contexts.
func (c *OpsWorks) DescribeOperatingSystemsWithContext(ctx aws.Context, input *DescribeOperatingSystemsInput, opts ...request.Option) (*DescribeOperatingSystemsOutput, error) { req, out := c.DescribeOperatingSystemsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() }
func (c *SecurityHub) DescribeStandardsWithContext(ctx aws.Context, input *DescribeStandardsInput, opts ...request.Option) (*DescribeStandardsOutput, error) { req, out := c.DescribeStandardsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() }
0.59425
aws/aws-sdk-go
service/opsworks/api.go
aws/aws-sdk-go
service/securityhub/api.go
Apache-2.0
go
WriteTempDockerfile writes a Dockerfile stream to a temporary file with a name specified by DefaultDockerfileName and returns the path to the temporary directory containing the Dockerfile.
func WriteTempDockerfile(rc io.ReadCloser) (dockerfileDir string, err error) { // err is a named return value, due to the defer call below. dockerfileDir, err = ioutil.TempDir("", "docker-build-tempdockerfile-") if err != nil { return "", errors.Errorf("unable to create temporary context directory: %v", err) } defer func() { if err != nil { os.RemoveAll(dockerfileDir) } }() f, err := os.Create(filepath.Join(dockerfileDir, DefaultDockerfileName)) if err != nil { return "", err } defer f.Close() if _, err := io.Copy(f, rc); err != nil { return "", err } return dockerfileDir, rc.Close() }
func ExportDockerImage(dockerImageName string) (string, error) { tempImageFile, err := os.CreateTemp("", "docker-image-*.tar") if err != nil { slog.Error(fmt.Sprintf("Failed to create temporary file: %s", err)) return "", err } err = tempImageFile.Close() if err != nil { _ = os.RemoveAll(tempImageFile.Name()) return "", err } // Check if image exists locally, if not, pull from the cloud. slog.Info(fmt.Sprintf("Checking if docker image (%q) exists locally...", dockerImageName)) cmd := exec.Command("docker", "images", "-q", dockerImageName) output, err := cmd.Output() if err != nil || string(output) == "" { slog.Info(fmt.Sprintf("Image not found locally, pulling docker image (%q)...", dockerImageName)) err = runCommandLogError("docker", "pull", "-q", dockerImageName) if err != nil { _ = os.RemoveAll(tempImageFile.Name()) return "", fmt.Errorf("failed to pull container image: %w", err) } } slog.Info(fmt.Sprintf("Saving docker image (%q) to temporary file...", dockerImageName)) err = runCommandLogError("docker", "save", "-o", tempImageFile.Name(), dockerImageName) if err != nil { _ = os.RemoveAll(tempImageFile.Name()) return "", err } return tempImageFile.Name(), nil }
0.575134
genuinetools/binctr
vendor/github.com/docker/docker-ce/components/cli/cli/command/image/build/context.go
google/osv-scanner
pkg/osvscanner/internal/imagehelpers/imagehelpers.go
Apache-2.0
go
WriteTempDockerfile writes a Dockerfile stream to a temporary file with a name specified by DefaultDockerfileName and returns the path to the temporary directory containing the Dockerfile.
func WriteTempDockerfile(rc io.ReadCloser) (dockerfileDir string, err error) { // err is a named return value, due to the defer call below. dockerfileDir, err = ioutil.TempDir("", "docker-build-tempdockerfile-") if err != nil { return "", errors.Errorf("unable to create temporary context directory: %v", err) } defer func() { if err != nil { os.RemoveAll(dockerfileDir) } }() f, err := os.Create(filepath.Join(dockerfileDir, DefaultDockerfileName)) if err != nil { return "", err } defer f.Close() if _, err := io.Copy(f, rc); err != nil { return "", err } return dockerfileDir, rc.Close() }
func replaceDockerfileForContentTrust(ctx context.Context, inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser { pipeReader, pipeWriter := io.Pipe() go func() { tarReader := tar.NewReader(inputTarStream) tarWriter := tar.NewWriter(pipeWriter) defer inputTarStream.Close() for { hdr, err := tarReader.Next() if err == io.EOF { // Signals end of archive. tarWriter.Close() pipeWriter.Close() return } if err != nil { pipeWriter.CloseWithError(err) return } content := io.Reader(tarReader) if hdr.Name == dockerfileName { // This entry is the Dockerfile. Since the tar archive was // generated from a directory on the local filesystem, the // Dockerfile will only appear once in the archive. var newDockerfile []byte newDockerfile, *resolvedTags, err = rewriteDockerfileFromForContentTrust(ctx, content, translator) if err != nil { pipeWriter.CloseWithError(err) return } hdr.Size = int64(len(newDockerfile)) content = bytes.NewBuffer(newDockerfile) } if err := tarWriter.WriteHeader(hdr); err != nil { pipeWriter.CloseWithError(err) return } if _, err := io.Copy(tarWriter, content); err != nil { pipeWriter.CloseWithError(err) return } } }() return pipeReader }
0.552019
genuinetools/binctr
vendor/github.com/docker/docker-ce/components/cli/cli/command/image/build/context.go
genuinetools/binctr
vendor/github.com/docker/docker-ce/components/cli/cli/command/image/build.go
MIT
go
WriteTempDockerfile writes a Dockerfile stream to a temporary file with a name specified by DefaultDockerfileName and returns the path to the temporary directory containing the Dockerfile.
func WriteTempDockerfile(rc io.ReadCloser) (dockerfileDir string, err error) { // err is a named return value, due to the defer call below. dockerfileDir, err = ioutil.TempDir("", "docker-build-tempdockerfile-") if err != nil { return "", errors.Errorf("unable to create temporary context directory: %v", err) } defer func() { if err != nil { os.RemoveAll(dockerfileDir) } }() f, err := os.Create(filepath.Join(dockerfileDir, DefaultDockerfileName)) if err != nil { return "", err } defer f.Close() if _, err := io.Copy(f, rc); err != nil { return "", err } return dockerfileDir, rc.Close() }
func createTempFileFromBytes( //nolint:unused // Note: this might come in handy for manual file operations such as too long CRDs. content []byte, tempDirectoryOverride string, tempFileNamePatternOverride string, fileModeOverride fs.FileMode, ) (string, error) { if fileModeOverride == 0 { fileModeOverride = 0o777 } tempFile, err := os.CreateTemp(tempDirectoryOverride, tempFileNamePatternOverride) if err != nil { return "", errors.WrapIfWithDetails( err, "creating temporary file failed", "content", string(content), "tempDirectoryOverride", tempDirectoryOverride, "tempFileNamePatternOverride", tempFileNamePatternOverride, ) } err = os.WriteFile(tempFile.Name(), content, fileModeOverride) if err != nil { return "", errors.WrapIfWithDetails( err, "writing content to temporary file failed", "fileName", tempFile.Name(), "content", string(content), "fileModeOverride", fileModeOverride, ) } err = tempFile.Close() if err != nil { return "", errors.WrapIfWithDetails(err, "closing temporary file failed", "tempPath", tempFile.Name()) } return tempFile.Name(), nil }
0.546249
genuinetools/binctr
vendor/github.com/docker/docker-ce/components/cli/cli/command/image/build/context.go
banzaicloud/koperator
tests/e2e/file.go
Apache-2.0
go
WriteTempDockerfile writes a Dockerfile stream to a temporary file with a name specified by DefaultDockerfileName and returns the path to the temporary directory containing the Dockerfile.
func WriteTempDockerfile(rc io.ReadCloser) (dockerfileDir string, err error) { // err is a named return value, due to the defer call below. dockerfileDir, err = ioutil.TempDir("", "docker-build-tempdockerfile-") if err != nil { return "", errors.Errorf("unable to create temporary context directory: %v", err) } defer func() { if err != nil { os.RemoveAll(dockerfileDir) } }() f, err := os.Create(filepath.Join(dockerfileDir, DefaultDockerfileName)) if err != nil { return "", err } defer f.Close() if _, err := io.Copy(f, rc); err != nil { return "", err } return dockerfileDir, rc.Close() }
func GetContextFromLocalDir(localDir, dockerfileName string) (string, string, error) { localDir, err := ResolveAndValidateContextPath(localDir) if err != nil { return "", "", err } // When using a local context directory, and the Dockerfile is specified // with the `-f/--file` option then it is considered relative to the // current directory and not the context directory. if dockerfileName != "" && dockerfileName != "-" { if dockerfileName, err = filepath.Abs(dockerfileName); err != nil { return "", "", errors.Errorf("unable to get absolute path to Dockerfile: %v", err) } } relDockerfile, err := getDockerfileRelPath(localDir, dockerfileName) return localDir, relDockerfile, err }
0.54618
genuinetools/binctr
vendor/github.com/docker/docker-ce/components/cli/cli/command/image/build/context.go
genuinetools/binctr
vendor/github.com/docker/docker-ce/components/cli/cli/command/image/build/context.go
MIT
go
WriteTempDockerfile writes a Dockerfile stream to a temporary file with a name specified by DefaultDockerfileName and returns the path to the temporary directory containing the Dockerfile.
func WriteTempDockerfile(rc io.ReadCloser) (dockerfileDir string, err error) { // err is a named return value, due to the defer call below. dockerfileDir, err = ioutil.TempDir("", "docker-build-tempdockerfile-") if err != nil { return "", errors.Errorf("unable to create temporary context directory: %v", err) } defer func() { if err != nil { os.RemoveAll(dockerfileDir) } }() f, err := os.Create(filepath.Join(dockerfileDir, DefaultDockerfileName)) if err != nil { return "", err } defer f.Close() if _, err := io.Copy(f, rc); err != nil { return "", err } return dockerfileDir, rc.Close() }
func buildDockerImage(imageDir, imageName string) { if dockDir == "" { panic("dockDir should be set before calling buildDockerImage") } cmd := exec.Command("docker", "build", "-t", imageName, ".") cmd.Dir = filepath.Join(dockDir, imageDir) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { log.Fatalf("Error building docker image %v: %v", imageName, err) } }
0.540523
genuinetools/binctr
vendor/github.com/docker/docker-ce/components/cli/cli/command/image/build/context.go
perkeep/perkeep
misc/docker/dock.go
Apache-2.0
go
PutConfigurationSetVdmOptionsWithContext is the same as PutConfigurationSetVdmOptions with the addition of the ability to pass a context and additional request options. See PutConfigurationSetVdmOptions for details on how to use this API operation. The context must be non-nil and will be used for request cancellation. If the context is nil a panic will occur. In the future the SDK may create sub-contexts for http.Requests. See https://golang.org/pkg/context/ for more information on using Contexts.
func (c *SESV2) PutConfigurationSetVdmOptionsWithContext(ctx aws.Context, input *PutConfigurationSetVdmOptionsInput, opts ...request.Option) (*PutConfigurationSetVdmOptionsOutput, error) { req, out := c.PutConfigurationSetVdmOptionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() }
func (c *SESV2) PutConfigurationSetVdmOptionsRequest(input *PutConfigurationSetVdmOptionsInput) (req *request.Request, output *PutConfigurationSetVdmOptionsOutput) { op := &request.Operation{ Name: opPutConfigurationSetVdmOptions, HTTPMethod: "PUT", HTTPPath: "/v2/email/configuration-sets/{ConfigurationSetName}/vdm-options", } if input == nil { input = &PutConfigurationSetVdmOptionsInput{} } output = &PutConfigurationSetVdmOptionsOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return }
0.828283
aws/aws-sdk-go
service/sesv2/api.go
aws/aws-sdk-go
service/sesv2/api.go
Apache-2.0
go
PutConfigurationSetVdmOptionsWithContext is the same as PutConfigurationSetVdmOptions with the addition of the ability to pass a context and additional request options. See PutConfigurationSetVdmOptions for details on how to use this API operation. The context must be non-nil and will be used for request cancellation. If the context is nil a panic will occur. In the future the SDK may create sub-contexts for http.Requests. See https://golang.org/pkg/context/ for more information on using Contexts.
func (c *SESV2) PutConfigurationSetVdmOptionsWithContext(ctx aws.Context, input *PutConfigurationSetVdmOptionsInput, opts ...request.Option) (*PutConfigurationSetVdmOptionsOutput, error) { req, out := c.PutConfigurationSetVdmOptionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() }
func (c *PinpointEmail) PutConfigurationSetSendingOptionsWithContext(ctx aws.Context, input *PutConfigurationSetSendingOptionsInput, opts ...request.Option) (*PutConfigurationSetSendingOptionsOutput, error) { req, out := c.PutConfigurationSetSendingOptionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() }
0.770309
aws/aws-sdk-go
service/sesv2/api.go
aws/aws-sdk-go
service/pinpointemail/api.go
Apache-2.0
go
PutConfigurationSetVdmOptionsWithContext is the same as PutConfigurationSetVdmOptions with the addition of the ability to pass a context and additional request options. See PutConfigurationSetVdmOptions for details on how to use this API operation. The context must be non-nil and will be used for request cancellation. If the context is nil a panic will occur. In the future the SDK may create sub-contexts for http.Requests. See https://golang.org/pkg/context/ for more information on using Contexts.
func (c *SESV2) PutConfigurationSetVdmOptionsWithContext(ctx aws.Context, input *PutConfigurationSetVdmOptionsInput, opts ...request.Option) (*PutConfigurationSetVdmOptionsOutput, error) { req, out := c.PutConfigurationSetVdmOptionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() }
func (c *SESV2) PutConfigurationSetSuppressionOptionsWithContext(ctx aws.Context, input *PutConfigurationSetSuppressionOptionsInput, opts ...request.Option) (*PutConfigurationSetSuppressionOptionsOutput, error) { req, out := c.PutConfigurationSetSuppressionOptionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() }
0.755294
aws/aws-sdk-go
service/sesv2/api.go
aws/aws-sdk-go
service/sesv2/api.go
Apache-2.0
go
PutConfigurationSetVdmOptionsWithContext is the same as PutConfigurationSetVdmOptions with the addition of the ability to pass a context and additional request options. See PutConfigurationSetVdmOptions for details on how to use this API operation. The context must be non-nil and will be used for request cancellation. If the context is nil a panic will occur. In the future the SDK may create sub-contexts for http.Requests. See https://golang.org/pkg/context/ for more information on using Contexts.
func (c *SESV2) PutConfigurationSetVdmOptionsWithContext(ctx aws.Context, input *PutConfigurationSetVdmOptionsInput, opts ...request.Option) (*PutConfigurationSetVdmOptionsOutput, error) { req, out := c.PutConfigurationSetVdmOptionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() }
func (c *PinpointEmail) PutConfigurationSetTrackingOptionsWithContext(ctx aws.Context, input *PutConfigurationSetTrackingOptionsInput, opts ...request.Option) (*PutConfigurationSetTrackingOptionsOutput, error) { req, out := c.PutConfigurationSetTrackingOptionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() }
0.752248
aws/aws-sdk-go
service/sesv2/api.go
aws/aws-sdk-go
service/pinpointemail/api.go
Apache-2.0
go
PutConfigurationSetVdmOptionsWithContext is the same as PutConfigurationSetVdmOptions with the addition of the ability to pass a context and additional request options. See PutConfigurationSetVdmOptions for details on how to use this API operation. The context must be non-nil and will be used for request cancellation. If the context is nil a panic will occur. In the future the SDK may create sub-contexts for http.Requests. See https://golang.org/pkg/context/ for more information on using Contexts.
func (c *SESV2) PutConfigurationSetVdmOptionsWithContext(ctx aws.Context, input *PutConfigurationSetVdmOptionsInput, opts ...request.Option) (*PutConfigurationSetVdmOptionsOutput, error) { req, out := c.PutConfigurationSetVdmOptionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() }
func (c *PinpointEmail) PutConfigurationSetDeliveryOptionsWithContext(ctx aws.Context, input *PutConfigurationSetDeliveryOptionsInput, opts ...request.Option) (*PutConfigurationSetDeliveryOptionsOutput, error) { req, out := c.PutConfigurationSetDeliveryOptionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() }
0.748403
aws/aws-sdk-go
service/sesv2/api.go
aws/aws-sdk-go
service/pinpointemail/api.go
Apache-2.0
go
PutAuditEvents API operation for AWS CloudTrail Data Service. Ingests your application events into CloudTrail Lake. A required parameter, auditEvents, accepts the JSON records (also called payload) of events that you want CloudTrail to ingest. You can add up to 100 of these events (or up to 1 MB) per PutAuditEvents request. Returns awserr.Error for service API and SDK errors. Use runtime type assertions with awserr.Error's Code and Message methods to get detailed information about the error. See the AWS API reference guide for AWS CloudTrail Data Service's API operation PutAuditEvents for usage and error information. Returned Error Types: - ChannelInsufficientPermission The caller's account ID must be the same as the channel owner's account ID. - ChannelNotFound The channel could not be found. - InvalidChannelARN The specified channel ARN is not a valid channel ARN. - ChannelUnsupportedSchema The schema type of the event is not supported. - DuplicatedAuditEventId Two or more entries in the request have the same event ID. - UnsupportedOperationException The operation requested is not supported in this region or account. See also, https://docs.aws.amazon.com/goto/WebAPI/cloudtrail-data-2021-08-11/PutAuditEvents
func (c *CloudTrailData) PutAuditEvents(input *PutAuditEventsInput) (*PutAuditEventsOutput, error) { req, out := c.PutAuditEventsRequest(input) return out, req.Send() }
func (c *CloudTrailData) PutAuditEventsRequest(input *PutAuditEventsInput) (req *request.Request, output *PutAuditEventsOutput) { op := &request.Operation{ Name: opPutAuditEvents, HTTPMethod: "POST", HTTPPath: "/PutAuditEvents", } if input == nil { input = &PutAuditEventsInput{} } output = &PutAuditEventsOutput{} req = c.newRequest(op, input, output) return }
0.795256
aws/aws-sdk-go
service/cloudtraildata/api.go
aws/aws-sdk-go
service/cloudtraildata/api.go
Apache-2.0
go
PutAuditEvents API operation for AWS CloudTrail Data Service. Ingests your application events into CloudTrail Lake. A required parameter, auditEvents, accepts the JSON records (also called payload) of events that you want CloudTrail to ingest. You can add up to 100 of these events (or up to 1 MB) per PutAuditEvents request. Returns awserr.Error for service API and SDK errors. Use runtime type assertions with awserr.Error's Code and Message methods to get detailed information about the error. See the AWS API reference guide for AWS CloudTrail Data Service's API operation PutAuditEvents for usage and error information. Returned Error Types: - ChannelInsufficientPermission The caller's account ID must be the same as the channel owner's account ID. - ChannelNotFound The channel could not be found. - InvalidChannelARN The specified channel ARN is not a valid channel ARN. - ChannelUnsupportedSchema The schema type of the event is not supported. - DuplicatedAuditEventId Two or more entries in the request have the same event ID. - UnsupportedOperationException The operation requested is not supported in this region or account. See also, https://docs.aws.amazon.com/goto/WebAPI/cloudtrail-data-2021-08-11/PutAuditEvents
func (c *CloudTrailData) PutAuditEvents(input *PutAuditEventsInput) (*PutAuditEventsOutput, error) { req, out := c.PutAuditEventsRequest(input) return out, req.Send() }
func (c *CloudWatchEvents) PutEvents(input *PutEventsInput) (*PutEventsOutput, error) { req, out := c.PutEventsRequest(input) return out, req.Send() }
0.73388
aws/aws-sdk-go
service/cloudtraildata/api.go
aws/aws-sdk-go
service/cloudwatchevents/api.go
Apache-2.0
go
PutAuditEvents API operation for AWS CloudTrail Data Service. Ingests your application events into CloudTrail Lake. A required parameter, auditEvents, accepts the JSON records (also called payload) of events that you want CloudTrail to ingest. You can add up to 100 of these events (or up to 1 MB) per PutAuditEvents request. Returns awserr.Error for service API and SDK errors. Use runtime type assertions with awserr.Error's Code and Message methods to get detailed information about the error. See the AWS API reference guide for AWS CloudTrail Data Service's API operation PutAuditEvents for usage and error information. Returned Error Types: - ChannelInsufficientPermission The caller's account ID must be the same as the channel owner's account ID. - ChannelNotFound The channel could not be found. - InvalidChannelARN The specified channel ARN is not a valid channel ARN. - ChannelUnsupportedSchema The schema type of the event is not supported. - DuplicatedAuditEventId Two or more entries in the request have the same event ID. - UnsupportedOperationException The operation requested is not supported in this region or account. See also, https://docs.aws.amazon.com/goto/WebAPI/cloudtrail-data-2021-08-11/PutAuditEvents
func (c *CloudTrailData) PutAuditEvents(input *PutAuditEventsInput) (*PutAuditEventsOutput, error) { req, out := c.PutAuditEventsRequest(input) return out, req.Send() }
func (c *MobileAnalytics) PutEvents(input *PutEventsInput) (*PutEventsOutput, error) { req, out := c.PutEventsRequest(input) return out, req.Send() }
0.72665
aws/aws-sdk-go
service/cloudtraildata/api.go
aws/aws-sdk-go
service/mobileanalytics/api.go
Apache-2.0
go
PutAuditEvents API operation for AWS CloudTrail Data Service. Ingests your application events into CloudTrail Lake. A required parameter, auditEvents, accepts the JSON records (also called payload) of events that you want CloudTrail to ingest. You can add up to 100 of these events (or up to 1 MB) per PutAuditEvents request. Returns awserr.Error for service API and SDK errors. Use runtime type assertions with awserr.Error's Code and Message methods to get detailed information about the error. See the AWS API reference guide for AWS CloudTrail Data Service's API operation PutAuditEvents for usage and error information. Returned Error Types: - ChannelInsufficientPermission The caller's account ID must be the same as the channel owner's account ID. - ChannelNotFound The channel could not be found. - InvalidChannelARN The specified channel ARN is not a valid channel ARN. - ChannelUnsupportedSchema The schema type of the event is not supported. - DuplicatedAuditEventId Two or more entries in the request have the same event ID. - UnsupportedOperationException The operation requested is not supported in this region or account. See also, https://docs.aws.amazon.com/goto/WebAPI/cloudtrail-data-2021-08-11/PutAuditEvents
func (c *CloudTrailData) PutAuditEvents(input *PutAuditEventsInput) (*PutAuditEventsOutput, error) { req, out := c.PutAuditEventsRequest(input) return out, req.Send() }
func (c *CloudWatchEvents) PutPartnerEvents(input *PutPartnerEventsInput) (*PutPartnerEventsOutput, error) { req, out := c.PutPartnerEventsRequest(input) return out, req.Send() }
0.72177
aws/aws-sdk-go
service/cloudtraildata/api.go
aws/aws-sdk-go
service/cloudwatchevents/api.go
Apache-2.0
go
PutAuditEvents API operation for AWS CloudTrail Data Service. Ingests your application events into CloudTrail Lake. A required parameter, auditEvents, accepts the JSON records (also called payload) of events that you want CloudTrail to ingest. You can add up to 100 of these events (or up to 1 MB) per PutAuditEvents request. Returns awserr.Error for service API and SDK errors. Use runtime type assertions with awserr.Error's Code and Message methods to get detailed information about the error. See the AWS API reference guide for AWS CloudTrail Data Service's API operation PutAuditEvents for usage and error information. Returned Error Types: - ChannelInsufficientPermission The caller's account ID must be the same as the channel owner's account ID. - ChannelNotFound The channel could not be found. - InvalidChannelARN The specified channel ARN is not a valid channel ARN. - ChannelUnsupportedSchema The schema type of the event is not supported. - DuplicatedAuditEventId Two or more entries in the request have the same event ID. - UnsupportedOperationException The operation requested is not supported in this region or account. See also, https://docs.aws.amazon.com/goto/WebAPI/cloudtrail-data-2021-08-11/PutAuditEvents
func (c *CloudTrailData) PutAuditEvents(input *PutAuditEventsInput) (*PutAuditEventsOutput, error) { req, out := c.PutAuditEventsRequest(input) return out, req.Send() }
func (c *CloudWatchLogs) PutLogEvents(input *PutLogEventsInput) (*PutLogEventsOutput, error) { req, out := c.PutLogEventsRequest(input) return out, req.Send() }
0.711528
aws/aws-sdk-go
service/cloudtraildata/api.go
aws/aws-sdk-go
service/cloudwatchlogs/api.go
Apache-2.0
go
WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple tar file entries in order to collect all the alternate data streams for the file, it returns the next tar file that was not processed, or io.EOF is there are no more.
func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) { bw := winio.NewBackupStreamWriter(w) sd, err := SecurityDescriptorFromTarHeader(hdr) if err != nil { return nil, err } if len(sd) != 0 { bhdr := winio.BackupHeader{ Id: winio.BackupSecurity, Size: int64(len(sd)), } err := bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = bw.Write(sd) if err != nil { return nil, err } } eadata, err := ExtendedAttributesFromTarHeader(hdr) if err != nil { return nil, err } if len(eadata) != 0 { bhdr := winio.BackupHeader{ Id: winio.BackupEaData, Size: int64(len(eadata)), } err = bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = bw.Write(eadata) if err != nil { return nil, err } } if hdr.Typeflag == tar.TypeSymlink { reparse := EncodeReparsePointFromTarHeader(hdr) bhdr := winio.BackupHeader{ Id: winio.BackupReparseData, Size: int64(len(reparse)), } err := bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = bw.Write(reparse) if err != nil { return nil, err } } if hdr.Typeflag == tar.TypeReg { bhdr := winio.BackupHeader{ Id: winio.BackupData, Size: hdr.Size, } err := bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = io.Copy(bw, t) if err != nil { return nil, err } } // Copy all the alternate data streams and return the next non-ADS header. for { ahdr, err := t.Next() if err != nil { return nil, err } if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") { return ahdr, nil } bhdr := winio.BackupHeader{ Id: winio.BackupAlternateData, Size: ahdr.Size, Name: ahdr.Name[len(hdr.Name):] + ":$DATA", } err = bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = io.Copy(bw, t) if err != nil { return nil, err } } }
func writeBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) { bw := winio.NewBackupStreamWriter(w) var sd []byte var err error // Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written // by this library will have raw binary for the security descriptor. if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok { sd, err = winio.SddlToSecurityDescriptor(sddl) if err != nil { return nil, err } } if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok { sd, err = base64.StdEncoding.DecodeString(sdraw) if err != nil { return nil, err } } if len(sd) != 0 { bhdr := winio.BackupHeader{ Id: winio.BackupSecurity, Size: int64(len(sd)), } err := bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = bw.Write(sd) if err != nil { return nil, err } } var eas []winio.ExtendedAttribute for k, v := range hdr.PAXRecords { if !strings.HasPrefix(k, hdrEaPrefix) { continue } data, err := base64.StdEncoding.DecodeString(v) if err != nil { return nil, err } eas = append(eas, winio.ExtendedAttribute{ Name: k[len(hdrEaPrefix):], Value: data, }) } if len(eas) != 0 { eadata, err := winio.EncodeExtendedAttributes(eas) if err != nil { return nil, err } bhdr := winio.BackupHeader{ Id: winio.BackupEaData, Size: int64(len(eadata)), } err = bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = bw.Write(eadata) if err != nil { return nil, err } } if hdr.Typeflag == tar.TypeSymlink { _, isMountPoint := hdr.PAXRecords[hdrMountPoint] rp := winio.ReparsePoint{ Target: filepath.FromSlash(hdr.Linkname), IsMountPoint: isMountPoint, } reparse := winio.EncodeReparsePoint(&rp) bhdr := winio.BackupHeader{ Id: winio.BackupReparseData, Size: int64(len(reparse)), } err := bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = bw.Write(reparse) if err != nil { return nil, err } } buf := bufPool.Get().(*[]byte) defer bufPool.Put(buf) if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { bhdr := winio.BackupHeader{ Id: winio.BackupData, Size: hdr.Size, } err := bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = io.CopyBuffer(bw, t, *buf) if err != nil { return nil, err } } // Copy all the alternate data streams and return the next non-ADS header. for { ahdr, err := t.Next() if err != nil { return nil, err } if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") { return ahdr, nil } bhdr := winio.BackupHeader{ Id: winio.BackupAlternateData, Size: ahdr.Size, Name: ahdr.Name[len(hdr.Name):] + ":$DATA", } err = bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = io.CopyBuffer(bw, t, *buf) if err != nil { return nil, err } } }
0.988644
microsoft/go-winio
backuptar/tar.go
genuinetools/binctr
vendor/github.com/containerd/containerd/archive/tar_windows.go
MIT
go
WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple tar file entries in order to collect all the alternate data streams for the file, it returns the next tar file that was not processed, or io.EOF is there are no more.
func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) { bw := winio.NewBackupStreamWriter(w) sd, err := SecurityDescriptorFromTarHeader(hdr) if err != nil { return nil, err } if len(sd) != 0 { bhdr := winio.BackupHeader{ Id: winio.BackupSecurity, Size: int64(len(sd)), } err := bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = bw.Write(sd) if err != nil { return nil, err } } eadata, err := ExtendedAttributesFromTarHeader(hdr) if err != nil { return nil, err } if len(eadata) != 0 { bhdr := winio.BackupHeader{ Id: winio.BackupEaData, Size: int64(len(eadata)), } err = bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = bw.Write(eadata) if err != nil { return nil, err } } if hdr.Typeflag == tar.TypeSymlink { reparse := EncodeReparsePointFromTarHeader(hdr) bhdr := winio.BackupHeader{ Id: winio.BackupReparseData, Size: int64(len(reparse)), } err := bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = bw.Write(reparse) if err != nil { return nil, err } } if hdr.Typeflag == tar.TypeReg { bhdr := winio.BackupHeader{ Id: winio.BackupData, Size: hdr.Size, } err := bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = io.Copy(bw, t) if err != nil { return nil, err } } // Copy all the alternate data streams and return the next non-ADS header. for { ahdr, err := t.Next() if err != nil { return nil, err } if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") { return ahdr, nil } bhdr := winio.BackupHeader{ Id: winio.BackupAlternateData, Size: ahdr.Size, Name: ahdr.Name[len(hdr.Name):] + ":$DATA", } err = bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = io.Copy(bw, t) if err != nil { return nil, err } } }
func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error { name = filepath.ToSlash(name) hdr := BasicInfoHeader(name, size, fileInfo) // If r can be seeked, then this function is two-pass: pass 1 collects the // tar header data, and pass 2 copies the data stream. If r cannot be // seeked, then some header data (in particular EAs) will be silently lost. var ( restartPos int64 err error ) sr, readTwice := r.(io.Seeker) if readTwice { if restartPos, err = sr.Seek(0, io.SeekCurrent); err != nil { readTwice = false } } br := winio.NewBackupStreamReader(r) var dataHdr *winio.BackupHeader for dataHdr == nil { bhdr, err := br.Next() if err == io.EOF { break } if err != nil { return err } switch bhdr.Id { case winio.BackupData: hdr.Mode |= c_ISREG if !readTwice { dataHdr = bhdr } case winio.BackupSecurity: sd, err := ioutil.ReadAll(br) if err != nil { return err } hdr.Winheaders[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd) case winio.BackupReparseData: hdr.Mode |= c_ISLNK hdr.Typeflag = tar.TypeSymlink reparseBuffer, err := ioutil.ReadAll(br) rp, err := winio.DecodeReparsePoint(reparseBuffer) if err != nil { return err } if rp.IsMountPoint { hdr.Winheaders[hdrMountPoint] = "1" } hdr.Linkname = rp.Target case winio.BackupEaData: eab, err := ioutil.ReadAll(br) if err != nil { return err } eas, err := winio.DecodeExtendedAttributes(eab) if err != nil { return err } for _, ea := range eas { // Use base64 encoding for the binary value. Note that there // is no way to encode the EA's flags, since their use doesn't // make any sense for persisted EAs. hdr.Winheaders[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value) } case winio.BackupAlternateData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData: // ignore these streams default: return fmt.Errorf("%s: unknown stream ID %d", name, bhdr.Id) } } err = t.WriteHeader(hdr) if err != nil { return err } if readTwice { // Get back to the data stream. if _, err = sr.Seek(restartPos, io.SeekStart); err != nil { return err } for dataHdr == nil { bhdr, err := br.Next() if err == io.EOF { break } if err != nil { return err } if bhdr.Id == winio.BackupData { dataHdr = bhdr } } } if dataHdr != nil { // A data stream was found. Copy the data. if (dataHdr.Attributes & winio.StreamSparseAttributes) == 0 { if size != dataHdr.Size { return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size) } _, err = io.Copy(t, br) if err != nil { return err } } else { err = copySparse(t, br) if err != nil { return err } } } // Look for streams after the data stream. The only ones we handle are alternate data streams. // Other streams may have metadata that could be serialized, but the tar header has already // been written. In practice, this means that we don't get EA or TXF metadata. for { bhdr, err := br.Next() if err == io.EOF { break } if err != nil { return err } switch bhdr.Id { case winio.BackupAlternateData: altName := bhdr.Name if strings.HasSuffix(altName, ":$DATA") { altName = altName[:len(altName)-len(":$DATA")] } if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 { hdr = &tar.Header{ Name: name + altName, Mode: hdr.Mode, Typeflag: tar.TypeReg, Size: bhdr.Size, ModTime: hdr.ModTime, AccessTime: hdr.AccessTime, ChangeTime: hdr.ChangeTime, } err = t.WriteHeader(hdr) if err != nil { return err } _, err = io.Copy(t, br) if err != nil { return err } } else { // Unsupported for now, since the size of the alternate stream is not present // in the backup stream until after the data has been read. return errors.New("tar of sparse alternate data streams is unsupported") } case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData: // ignore these streams default: return fmt.Errorf("%s: unknown stream ID %d after data", name, bhdr.Id) } } return nil }
0.816314
microsoft/go-winio
backuptar/tar.go
genuinetools/binctr
vendor/github.com/Microsoft/go-winio/backuptar/tar.go
MIT
go
WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple tar file entries in order to collect all the alternate data streams for the file, it returns the next tar file that was not processed, or io.EOF is there are no more.
func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) { bw := winio.NewBackupStreamWriter(w) sd, err := SecurityDescriptorFromTarHeader(hdr) if err != nil { return nil, err } if len(sd) != 0 { bhdr := winio.BackupHeader{ Id: winio.BackupSecurity, Size: int64(len(sd)), } err := bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = bw.Write(sd) if err != nil { return nil, err } } eadata, err := ExtendedAttributesFromTarHeader(hdr) if err != nil { return nil, err } if len(eadata) != 0 { bhdr := winio.BackupHeader{ Id: winio.BackupEaData, Size: int64(len(eadata)), } err = bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = bw.Write(eadata) if err != nil { return nil, err } } if hdr.Typeflag == tar.TypeSymlink { reparse := EncodeReparsePointFromTarHeader(hdr) bhdr := winio.BackupHeader{ Id: winio.BackupReparseData, Size: int64(len(reparse)), } err := bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = bw.Write(reparse) if err != nil { return nil, err } } if hdr.Typeflag == tar.TypeReg { bhdr := winio.BackupHeader{ Id: winio.BackupData, Size: hdr.Size, } err := bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = io.Copy(bw, t) if err != nil { return nil, err } } // Copy all the alternate data streams and return the next non-ADS header. for { ahdr, err := t.Next() if err != nil { return nil, err } if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") { return ahdr, nil } bhdr := winio.BackupHeader{ Id: winio.BackupAlternateData, Size: ahdr.Size, Name: ahdr.Name[len(hdr.Name):] + ":$DATA", } err = bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = io.Copy(bw, t) if err != nil { return nil, err } } }
func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error { name = filepath.ToSlash(name) hdr := BasicInfoHeader(name, size, fileInfo) // If r can be seeked, then this function is two-pass: pass 1 collects the // tar header data, and pass 2 copies the data stream. If r cannot be // seeked, then some header data (in particular EAs) will be silently lost. var ( restartPos int64 err error ) sr, readTwice := r.(io.Seeker) if readTwice { if restartPos, err = sr.Seek(0, io.SeekCurrent); err != nil { readTwice = false } } br := winio.NewBackupStreamReader(r) var dataHdr *winio.BackupHeader for dataHdr == nil { bhdr, err := br.Next() if err == io.EOF { //nolint:errorlint break } if err != nil { return err } switch bhdr.Id { case winio.BackupData: hdr.Mode |= cISREG if !readTwice { dataHdr = bhdr } case winio.BackupSecurity: sd, err := io.ReadAll(br) if err != nil { return err } hdr.PAXRecords[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd) case winio.BackupReparseData: hdr.Mode |= cISLNK hdr.Typeflag = tar.TypeSymlink reparseBuffer, _ := io.ReadAll(br) rp, err := winio.DecodeReparsePoint(reparseBuffer) if err != nil { return err } if rp.IsMountPoint { hdr.PAXRecords[hdrMountPoint] = "1" } hdr.Linkname = rp.Target case winio.BackupEaData: eab, err := io.ReadAll(br) if err != nil { return err } eas, err := winio.DecodeExtendedAttributes(eab) if err != nil { return err } for _, ea := range eas { // Use base64 encoding for the binary value. Note that there // is no way to encode the EA's flags, since their use doesn't // make any sense for persisted EAs. hdr.PAXRecords[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value) } case winio.BackupAlternateData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData: // ignore these streams default: return fmt.Errorf("%s: unknown stream ID %d", name, bhdr.Id) } } err = t.WriteHeader(hdr) if err != nil { return err } if readTwice { // Get back to the data stream. if _, err = sr.Seek(restartPos, io.SeekStart); err != nil { return err } for dataHdr == nil { bhdr, err := br.Next() if err == io.EOF { //nolint:errorlint break } if err != nil { return err } if bhdr.Id == winio.BackupData { dataHdr = bhdr } } } // The logic for copying file contents is fairly complicated due to the need for handling sparse files, // and the weird ways they are represented by BackupRead. A normal file will always either have a data stream // with size and content, or no data stream at all (if empty). However, for a sparse file, the content can also // be represented using a series of sparse block streams following the data stream. Additionally, the way sparse // files are handled by BackupRead has changed in the OS recently. The specifics of the representation are described // in the list at the bottom of this block comment. // // Sparse files can be represented in four different ways, based on the specifics of the file. // - Size = 0: // Previously: BackupRead yields no data stream and no sparse block streams. // Recently: BackupRead yields a data stream with size = 0. There are no following sparse block streams. // - Size > 0, no allocated ranges: // BackupRead yields a data stream with size = 0. Following is a single sparse block stream with // size = 0 and offset = <file size>. // - Size > 0, one allocated range: // BackupRead yields a data stream with size = <file size> containing the file contents. There are no // sparse block streams. This is the case if you take a normal file with contents and simply set the // sparse flag on it. // - Size > 0, multiple allocated ranges: // BackupRead yields a data stream with size = 0. Following are sparse block streams for each allocated // range of the file containing the range contents. Finally there is a sparse block stream with // size = 0 and offset = <file size>. if dataHdr != nil { //nolint:nestif // todo: reduce nesting complexity // A data stream was found. Copy the data. // We assume that we will either have a data stream size > 0 XOR have sparse block streams. if dataHdr.Size > 0 || (dataHdr.Attributes&winio.StreamSparseAttributes) == 0 { if size != dataHdr.Size { return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size) } if _, err = io.Copy(t, br); err != nil { return fmt.Errorf("%s: copying contents from data stream: %w", name, err) } } else if size > 0 { // As of a recent OS change, BackupRead now returns a data stream for empty sparse files. // These files have no sparse block streams, so skip the copySparse call if file size = 0. if err = copySparse(t, br); err != nil { return fmt.Errorf("%s: copying contents from sparse block stream: %w", name, err) } } } // Look for streams after the data stream. The only ones we handle are alternate data streams. // Other streams may have metadata that could be serialized, but the tar header has already // been written. In practice, this means that we don't get EA or TXF metadata. for { bhdr, err := br.Next() if err == io.EOF { //nolint:errorlint break } if err != nil { return err } switch bhdr.Id { case winio.BackupAlternateData: if (bhdr.Attributes & winio.StreamSparseAttributes) != 0 { // Unsupported for now, since the size of the alternate stream is not present // in the backup stream until after the data has been read. return fmt.Errorf("%s: tar of sparse alternate data streams is unsupported", name) } altName := strings.TrimSuffix(bhdr.Name, ":$DATA") hdr = &tar.Header{ Format: hdr.Format, Name: name + altName, Mode: hdr.Mode, Typeflag: tar.TypeReg, Size: bhdr.Size, ModTime: hdr.ModTime, AccessTime: hdr.AccessTime, ChangeTime: hdr.ChangeTime, } err = t.WriteHeader(hdr) if err != nil { return err } _, err = io.Copy(t, br) if err != nil { return err } case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData: // ignore these streams default: return fmt.Errorf("%s: unknown stream ID %d after data", name, bhdr.Id) } } return nil }
0.811952
microsoft/go-winio
backuptar/tar.go
microsoft/go-winio
backuptar/tar.go
MIT
go
WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple tar file entries in order to collect all the alternate data streams for the file, it returns the next tar file that was not processed, or io.EOF is there are no more.
func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) { bw := winio.NewBackupStreamWriter(w) sd, err := SecurityDescriptorFromTarHeader(hdr) if err != nil { return nil, err } if len(sd) != 0 { bhdr := winio.BackupHeader{ Id: winio.BackupSecurity, Size: int64(len(sd)), } err := bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = bw.Write(sd) if err != nil { return nil, err } } eadata, err := ExtendedAttributesFromTarHeader(hdr) if err != nil { return nil, err } if len(eadata) != 0 { bhdr := winio.BackupHeader{ Id: winio.BackupEaData, Size: int64(len(eadata)), } err = bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = bw.Write(eadata) if err != nil { return nil, err } } if hdr.Typeflag == tar.TypeSymlink { reparse := EncodeReparsePointFromTarHeader(hdr) bhdr := winio.BackupHeader{ Id: winio.BackupReparseData, Size: int64(len(reparse)), } err := bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = bw.Write(reparse) if err != nil { return nil, err } } if hdr.Typeflag == tar.TypeReg { bhdr := winio.BackupHeader{ Id: winio.BackupData, Size: hdr.Size, } err := bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = io.Copy(bw, t) if err != nil { return nil, err } } // Copy all the alternate data streams and return the next non-ADS header. for { ahdr, err := t.Next() if err != nil { return nil, err } if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") { return ahdr, nil } bhdr := winio.BackupHeader{ Id: winio.BackupAlternateData, Size: ahdr.Size, Name: ahdr.Name[len(hdr.Name):] + ":$DATA", } err = bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = io.Copy(bw, t) if err != nil { return nil, err } } }
func tarToBackupStreamWithMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) { var ( bcdBackup *os.File bcdBackupWriter *winio.BackupFileWriter ) if backupPath, ok := mutatedFiles[hdr.Name]; ok { bcdBackup, err = os.Create(filepath.Join(root, backupPath)) if err != nil { return nil, err } defer func() { cerr := bcdBackup.Close() if err == nil { err = cerr } }() bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false) defer func() { cerr := bcdBackupWriter.Close() if err == nil { err = cerr } }() buf.Reset(io.MultiWriter(w, bcdBackupWriter)) } else { buf.Reset(w) } defer func() { ferr := buf.Flush() if err == nil { err = ferr } }() return writeBackupStreamFromTarFile(buf, t, hdr) }
0.792836
microsoft/go-winio
backuptar/tar.go
genuinetools/binctr
vendor/github.com/containerd/containerd/archive/tar_windows.go
MIT
go
WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple tar file entries in order to collect all the alternate data streams for the file, it returns the next tar file that was not processed, or io.EOF is there are no more.
func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) { bw := winio.NewBackupStreamWriter(w) sd, err := SecurityDescriptorFromTarHeader(hdr) if err != nil { return nil, err } if len(sd) != 0 { bhdr := winio.BackupHeader{ Id: winio.BackupSecurity, Size: int64(len(sd)), } err := bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = bw.Write(sd) if err != nil { return nil, err } } eadata, err := ExtendedAttributesFromTarHeader(hdr) if err != nil { return nil, err } if len(eadata) != 0 { bhdr := winio.BackupHeader{ Id: winio.BackupEaData, Size: int64(len(eadata)), } err = bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = bw.Write(eadata) if err != nil { return nil, err } } if hdr.Typeflag == tar.TypeSymlink { reparse := EncodeReparsePointFromTarHeader(hdr) bhdr := winio.BackupHeader{ Id: winio.BackupReparseData, Size: int64(len(reparse)), } err := bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = bw.Write(reparse) if err != nil { return nil, err } } if hdr.Typeflag == tar.TypeReg { bhdr := winio.BackupHeader{ Id: winio.BackupData, Size: hdr.Size, } err := bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = io.Copy(bw, t) if err != nil { return nil, err } } // Copy all the alternate data streams and return the next non-ADS header. for { ahdr, err := t.Next() if err != nil { return nil, err } if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") { return ahdr, nil } bhdr := winio.BackupHeader{ Id: winio.BackupAlternateData, Size: ahdr.Size, Name: ahdr.Name[len(hdr.Name):] + ":$DATA", } err = bw.WriteHeader(&bhdr) if err != nil { return nil, err } _, err = io.Copy(bw, t) if err != nil { return nil, err } } }
func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) { var bcdBackup *os.File var bcdBackupWriter *winio.BackupFileWriter if backupPath, ok := mutatedFiles[hdr.Name]; ok { bcdBackup, err = os.Create(filepath.Join(root, backupPath)) if err != nil { return nil, err } defer func() { cerr := bcdBackup.Close() if err == nil { err = cerr } }() bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false) defer func() { cerr := bcdBackupWriter.Close() if err == nil { err = cerr } }() buf.Reset(io.MultiWriter(w, bcdBackupWriter)) } else { buf.Reset(w) } defer func() { ferr := buf.Flush() if err == nil { err = ferr } }() return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr) }
0.790002
microsoft/go-winio
backuptar/tar.go
containers/podman-tui
vendor/github.com/containers/storage/drivers/windows/windows.go
Apache-2.0
go
DisassociateApprovedOrigin API operation for Amazon Connect Service. This API is in preview release for Amazon Connect and is subject to change. Revokes access to integrated applications from Amazon Connect. Returns awserr.Error for service API and SDK errors. Use runtime type assertions with awserr.Error's Code and Message methods to get detailed information about the error. See the AWS API reference guide for Amazon Connect Service's API operation DisassociateApprovedOrigin for usage and error information. Returned Error Types: - ResourceNotFoundException The specified resource was not found. - InternalServiceException Request processing failed because of an error or failure with the service. - InvalidRequestException The request is not valid. - InvalidParameterException One or more of the specified parameters are not valid. - ThrottlingException The throttling limit has been exceeded. See also, https://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/DisassociateApprovedOrigin
func (c *Connect) DisassociateApprovedOrigin(input *DisassociateApprovedOriginInput) (*DisassociateApprovedOriginOutput, error) { req, out := c.DisassociateApprovedOriginRequest(input) return out, req.Send() }
func (c *Connect) AssociateApprovedOrigin(input *AssociateApprovedOriginInput) (*AssociateApprovedOriginOutput, error) { req, out := c.AssociateApprovedOriginRequest(input) return out, req.Send() }
0.879012
aws/aws-sdk-go
service/connect/api.go
aws/aws-sdk-go
service/connect/api.go
Apache-2.0
go
DisassociateApprovedOrigin API operation for Amazon Connect Service. This API is in preview release for Amazon Connect and is subject to change. Revokes access to integrated applications from Amazon Connect. Returns awserr.Error for service API and SDK errors. Use runtime type assertions with awserr.Error's Code and Message methods to get detailed information about the error. See the AWS API reference guide for Amazon Connect Service's API operation DisassociateApprovedOrigin for usage and error information. Returned Error Types: - ResourceNotFoundException The specified resource was not found. - InternalServiceException Request processing failed because of an error or failure with the service. - InvalidRequestException The request is not valid. - InvalidParameterException One or more of the specified parameters are not valid. - ThrottlingException The throttling limit has been exceeded. See also, https://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/DisassociateApprovedOrigin
func (c *Connect) DisassociateApprovedOrigin(input *DisassociateApprovedOriginInput) (*DisassociateApprovedOriginOutput, error) { req, out := c.DisassociateApprovedOriginRequest(input) return out, req.Send() }
func (c *Connect) DisassociateApprovedOriginRequest(input *DisassociateApprovedOriginInput) (req *request.Request, output *DisassociateApprovedOriginOutput) { op := &request.Operation{ Name: opDisassociateApprovedOrigin, HTTPMethod: "DELETE", HTTPPath: "/instance/{InstanceId}/approved-origin", } if input == nil { input = &DisassociateApprovedOriginInput{} } output = &DisassociateApprovedOriginOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return }
0.785214
aws/aws-sdk-go
service/connect/api.go
aws/aws-sdk-go
service/connect/api.go
Apache-2.0
go
DisassociateApprovedOrigin API operation for Amazon Connect Service. This API is in preview release for Amazon Connect and is subject to change. Revokes access to integrated applications from Amazon Connect. Returns awserr.Error for service API and SDK errors. Use runtime type assertions with awserr.Error's Code and Message methods to get detailed information about the error. See the AWS API reference guide for Amazon Connect Service's API operation DisassociateApprovedOrigin for usage and error information. Returned Error Types: - ResourceNotFoundException The specified resource was not found. - InternalServiceException Request processing failed because of an error or failure with the service. - InvalidRequestException The request is not valid. - InvalidParameterException One or more of the specified parameters are not valid. - ThrottlingException The throttling limit has been exceeded. See also, https://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/DisassociateApprovedOrigin
func (c *Connect) DisassociateApprovedOrigin(input *DisassociateApprovedOriginInput) (*DisassociateApprovedOriginOutput, error) { req, out := c.DisassociateApprovedOriginRequest(input) return out, req.Send() }
func (c *Connect) DisassociateApprovedOriginWithContext(ctx aws.Context, input *DisassociateApprovedOriginInput, opts ...request.Option) (*DisassociateApprovedOriginOutput, error) { req, out := c.DisassociateApprovedOriginRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() }
0.743062
aws/aws-sdk-go
service/connect/api.go
aws/aws-sdk-go
service/connect/api.go
Apache-2.0
go
DisassociateApprovedOrigin API operation for Amazon Connect Service. This API is in preview release for Amazon Connect and is subject to change. Revokes access to integrated applications from Amazon Connect. Returns awserr.Error for service API and SDK errors. Use runtime type assertions with awserr.Error's Code and Message methods to get detailed information about the error. See the AWS API reference guide for Amazon Connect Service's API operation DisassociateApprovedOrigin for usage and error information. Returned Error Types: - ResourceNotFoundException The specified resource was not found. - InternalServiceException Request processing failed because of an error or failure with the service. - InvalidRequestException The request is not valid. - InvalidParameterException One or more of the specified parameters are not valid. - ThrottlingException The throttling limit has been exceeded. See also, https://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/DisassociateApprovedOrigin
func (c *Connect) DisassociateApprovedOrigin(input *DisassociateApprovedOriginInput) (*DisassociateApprovedOriginOutput, error) { req, out := c.DisassociateApprovedOriginRequest(input) return out, req.Send() }
func (c *Connect) ListApprovedOrigins(input *ListApprovedOriginsInput) (*ListApprovedOriginsOutput, error) { req, out := c.ListApprovedOriginsRequest(input) return out, req.Send() }
0.691459
aws/aws-sdk-go
service/connect/api.go
aws/aws-sdk-go
service/connect/api.go
Apache-2.0
go
DisassociateApprovedOrigin API operation for Amazon Connect Service. This API is in preview release for Amazon Connect and is subject to change. Revokes access to integrated applications from Amazon Connect. Returns awserr.Error for service API and SDK errors. Use runtime type assertions with awserr.Error's Code and Message methods to get detailed information about the error. See the AWS API reference guide for Amazon Connect Service's API operation DisassociateApprovedOrigin for usage and error information. Returned Error Types: - ResourceNotFoundException The specified resource was not found. - InternalServiceException Request processing failed because of an error or failure with the service. - InvalidRequestException The request is not valid. - InvalidParameterException One or more of the specified parameters are not valid. - ThrottlingException The throttling limit has been exceeded. See also, https://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/DisassociateApprovedOrigin
func (c *Connect) DisassociateApprovedOrigin(input *DisassociateApprovedOriginInput) (*DisassociateApprovedOriginOutput, error) { req, out := c.DisassociateApprovedOriginRequest(input) return out, req.Send() }
func (c *Connect) AssociateApprovedOriginRequest(input *AssociateApprovedOriginInput) (req *request.Request, output *AssociateApprovedOriginOutput) { op := &request.Operation{ Name: opAssociateApprovedOrigin, HTTPMethod: "PUT", HTTPPath: "/instance/{InstanceId}/approved-origin", } if input == nil { input = &AssociateApprovedOriginInput{} } output = &AssociateApprovedOriginOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return }
0.686188
aws/aws-sdk-go
service/connect/api.go
aws/aws-sdk-go
service/connect/api.go
Apache-2.0
go
FilterDisabledModule is a free log retrieval operation binding the contract event 0xaab4fa2b463f581b2b32cb3b7e3b704b9ce37cc209b5fb4d77e593ace4054276. Solidity: event DisabledModule(address indexed module)
func (_Safe *SafeFilterer) FilterDisabledModule(opts *bind.FilterOpts, module []common.Address) (*SafeDisabledModuleIterator, error) { var moduleRule []interface{} for _, moduleItem := range module { moduleRule = append(moduleRule, moduleItem) } logs, sub, err := _Safe.contract.FilterLogs(opts, "DisabledModule", moduleRule) if err != nil { return nil, err } return &SafeDisabledModuleIterator{contract: _Safe.contract, event: "DisabledModule", logs: logs, sub: sub}, nil }
func (_SafeV130 *SafeV130Filterer) FilterDisabledModule(opts *bind.FilterOpts) (*SafeV130DisabledModuleIterator, error) { logs, sub, err := _SafeV130.contract.FilterLogs(opts, "DisabledModule") if err != nil { return nil, err } return &SafeV130DisabledModuleIterator{contract: _SafeV130.contract, event: "DisabledModule", logs: logs, sub: sub}, nil }
0.988108
ethereum-optimism/optimism
op-e2e/bindings/safe.go
ethereum-optimism/optimism
op-e2e/bindings/safe_v130.go
MIT
go
FilterDisabledModule is a free log retrieval operation binding the contract event 0xaab4fa2b463f581b2b32cb3b7e3b704b9ce37cc209b5fb4d77e593ace4054276. Solidity: event DisabledModule(address indexed module)
func (_Safe *SafeFilterer) FilterDisabledModule(opts *bind.FilterOpts, module []common.Address) (*SafeDisabledModuleIterator, error) { var moduleRule []interface{} for _, moduleItem := range module { moduleRule = append(moduleRule, moduleItem) } logs, sub, err := _Safe.contract.FilterLogs(opts, "DisabledModule", moduleRule) if err != nil { return nil, err } return &SafeDisabledModuleIterator{contract: _Safe.contract, event: "DisabledModule", logs: logs, sub: sub}, nil }
func (_Safe *SafeFilterer) FilterEnabledModule(opts *bind.FilterOpts, module []common.Address) (*SafeEnabledModuleIterator, error) { var moduleRule []interface{} for _, moduleItem := range module { moduleRule = append(moduleRule, moduleItem) } logs, sub, err := _Safe.contract.FilterLogs(opts, "EnabledModule", moduleRule) if err != nil { return nil, err } return &SafeEnabledModuleIterator{contract: _Safe.contract, event: "EnabledModule", logs: logs, sub: sub}, nil }
0.913092
ethereum-optimism/optimism
op-e2e/bindings/safe.go
ethereum-optimism/optimism
op-e2e/bindings/safe.go
MIT
go
FilterDisabledModule is a free log retrieval operation binding the contract event 0xaab4fa2b463f581b2b32cb3b7e3b704b9ce37cc209b5fb4d77e593ace4054276. Solidity: event DisabledModule(address indexed module)
func (_Safe *SafeFilterer) FilterDisabledModule(opts *bind.FilterOpts, module []common.Address) (*SafeDisabledModuleIterator, error) { var moduleRule []interface{} for _, moduleItem := range module { moduleRule = append(moduleRule, moduleItem) } logs, sub, err := _Safe.contract.FilterLogs(opts, "DisabledModule", moduleRule) if err != nil { return nil, err } return &SafeDisabledModuleIterator{contract: _Safe.contract, event: "DisabledModule", logs: logs, sub: sub}, nil }
func (_SafeV130 *SafeV130Filterer) FilterEnabledModule(opts *bind.FilterOpts) (*SafeV130EnabledModuleIterator, error) { logs, sub, err := _SafeV130.contract.FilterLogs(opts, "EnabledModule") if err != nil { return nil, err } return &SafeV130EnabledModuleIterator{contract: _SafeV130.contract, event: "EnabledModule", logs: logs, sub: sub}, nil }
0.90444
ethereum-optimism/optimism
op-e2e/bindings/safe.go
ethereum-optimism/optimism
op-e2e/bindings/safe_v130.go
MIT
go
FilterDisabledModule is a free log retrieval operation binding the contract event 0xaab4fa2b463f581b2b32cb3b7e3b704b9ce37cc209b5fb4d77e593ace4054276. Solidity: event DisabledModule(address indexed module)
func (_Safe *SafeFilterer) FilterDisabledModule(opts *bind.FilterOpts, module []common.Address) (*SafeDisabledModuleIterator, error) { var moduleRule []interface{} for _, moduleItem := range module { moduleRule = append(moduleRule, moduleItem) } logs, sub, err := _Safe.contract.FilterLogs(opts, "DisabledModule", moduleRule) if err != nil { return nil, err } return &SafeDisabledModuleIterator{contract: _Safe.contract, event: "DisabledModule", logs: logs, sub: sub}, nil }
func (_SafeV130 *SafeV130Filterer) WatchDisabledModule(opts *bind.WatchOpts, sink chan<- *SafeV130DisabledModule) (event.Subscription, error) { logs, sub, err := _SafeV130.contract.WatchLogs(opts, "DisabledModule") if err != nil { return nil, err } return event.NewSubscription(func(quit <-chan struct{}) error { defer sub.Unsubscribe() for { select { case log := <-logs: // New log arrived, parse the event and forward to the user event := new(SafeV130DisabledModule) if err := _SafeV130.contract.UnpackLog(event, "DisabledModule", log); err != nil { return err } event.Raw = log select { case sink <- event: case err := <-sub.Err(): return err case <-quit: return nil } case err := <-sub.Err(): return err case <-quit: return nil } } }), nil }
0.790028
ethereum-optimism/optimism
op-e2e/bindings/safe.go
ethereum-optimism/optimism
op-e2e/bindings/safe_v130.go
MIT
go
FilterDisabledModule is a free log retrieval operation binding the contract event 0xaab4fa2b463f581b2b32cb3b7e3b704b9ce37cc209b5fb4d77e593ace4054276. Solidity: event DisabledModule(address indexed module)
func (_Safe *SafeFilterer) FilterDisabledModule(opts *bind.FilterOpts, module []common.Address) (*SafeDisabledModuleIterator, error) { var moduleRule []interface{} for _, moduleItem := range module { moduleRule = append(moduleRule, moduleItem) } logs, sub, err := _Safe.contract.FilterLogs(opts, "DisabledModule", moduleRule) if err != nil { return nil, err } return &SafeDisabledModuleIterator{contract: _Safe.contract, event: "DisabledModule", logs: logs, sub: sub}, nil }
func (_Safe *SafeFilterer) WatchDisabledModule(opts *bind.WatchOpts, sink chan<- *SafeDisabledModule, module []common.Address) (event.Subscription, error) { var moduleRule []interface{} for _, moduleItem := range module { moduleRule = append(moduleRule, moduleItem) } logs, sub, err := _Safe.contract.WatchLogs(opts, "DisabledModule", moduleRule) if err != nil { return nil, err } return event.NewSubscription(func(quit <-chan struct{}) error { defer sub.Unsubscribe() for { select { case log := <-logs: // New log arrived, parse the event and forward to the user event := new(SafeDisabledModule) if err := _Safe.contract.UnpackLog(event, "DisabledModule", log); err != nil { return err } event.Raw = log select { case sink <- event: case err := <-sub.Err(): return err case <-quit: return nil } case err := <-sub.Err(): return err case <-quit: return nil } } }), nil }
0.783744
ethereum-optimism/optimism
op-e2e/bindings/safe.go
ethereum-optimism/optimism
op-e2e/bindings/safe.go
MIT
go
newInformer returns a controller for populating the store while also providing event notifications. Parameters * lw is list and watch functions for the source of the resource you want to be informed of. * objType is an object of the type that you expect to receive. * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate calls, even if nothing changed). Otherwise, re-list will be delayed as long as possible (until the upstream source closes the watch or times out, or you stop the controller). * h is the object you want notifications sent to. * clientState is the store you want to populate
func newInformer( lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, h ResourceEventHandler, clientState Store, ) Controller { // This will hold incoming changes. Note how we pass clientState in as a // KeyLister, that way resync operations will result in the correct set // of update/delete deltas. fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ KnownObjects: clientState, EmitDeltaTypeReplaced: true, }) cfg := &Config{ Queue: fifo, ListerWatcher: lw, ObjectType: objType, FullResyncPeriod: resyncPeriod, RetryOnError: false, Process: func(obj interface{}) error { // from oldest to newest for _, d := range obj.(Deltas) { switch d.Type { case Sync, Replaced, Added, Updated: if old, exists, err := clientState.Get(d.Object); err == nil && exists { if err := clientState.Update(d.Object); err != nil { return err } h.OnUpdate(old, d.Object) } else { if err := clientState.Add(d.Object); err != nil { return err } h.OnAdd(d.Object) } case Deleted: if err := clientState.Delete(d.Object); err != nil { return err } h.OnDelete(d.Object) } } return nil }, } return New(cfg) }
func newInformer( lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, h ResourceEventHandler, clientState Store, transformer TransformFunc, ) Controller { // This will hold incoming changes. Note how we pass clientState in as a // KeyLister, that way resync operations will result in the correct set // of update/delete deltas. fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ KnownObjects: clientState, EmitDeltaTypeReplaced: true, Transformer: transformer, }) cfg := &Config{ Queue: fifo, ListerWatcher: lw, ObjectType: objType, FullResyncPeriod: resyncPeriod, RetryOnError: false, Process: func(obj interface{}, isInInitialList bool) error { if deltas, ok := obj.(Deltas); ok { return processDeltas(h, clientState, deltas, isInInitialList) } return errors.New("object given as Process argument is not Deltas") }, } return New(cfg) }
0.985065
armory/spinnaker-operator
vendor/k8s.io/client-go/tools/cache/controller.go
k8snetworkplumbingwg/multus-cni
vendor/k8s.io/client-go/tools/cache/controller.go
Apache-2.0
go
newInformer returns a controller for populating the store while also providing event notifications. Parameters * lw is list and watch functions for the source of the resource you want to be informed of. * objType is an object of the type that you expect to receive. * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate calls, even if nothing changed). Otherwise, re-list will be delayed as long as possible (until the upstream source closes the watch or times out, or you stop the controller). * h is the object you want notifications sent to. * clientState is the store you want to populate
func newInformer( lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, h ResourceEventHandler, clientState Store, ) Controller { // This will hold incoming changes. Note how we pass clientState in as a // KeyLister, that way resync operations will result in the correct set // of update/delete deltas. fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ KnownObjects: clientState, EmitDeltaTypeReplaced: true, }) cfg := &Config{ Queue: fifo, ListerWatcher: lw, ObjectType: objType, FullResyncPeriod: resyncPeriod, RetryOnError: false, Process: func(obj interface{}) error { // from oldest to newest for _, d := range obj.(Deltas) { switch d.Type { case Sync, Replaced, Added, Updated: if old, exists, err := clientState.Get(d.Object); err == nil && exists { if err := clientState.Update(d.Object); err != nil { return err } h.OnUpdate(old, d.Object) } else { if err := clientState.Add(d.Object); err != nil { return err } h.OnAdd(d.Object) } case Deleted: if err := clientState.Delete(d.Object); err != nil { return err } h.OnDelete(d.Object) } } return nil }, } return New(cfg) }
func NewInformer( lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, h ResourceEventHandler, ) (Store, Controller) { // This will hold the client state, as we know it. clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) return clientState, newInformer(lw, objType, resyncPeriod, h, clientState) }
0.931936
armory/spinnaker-operator
vendor/k8s.io/client-go/tools/cache/controller.go
armory/spinnaker-operator
vendor/k8s.io/client-go/tools/cache/controller.go
Apache-2.0
go
newInformer returns a controller for populating the store while also providing event notifications. Parameters * lw is list and watch functions for the source of the resource you want to be informed of. * objType is an object of the type that you expect to receive. * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate calls, even if nothing changed). Otherwise, re-list will be delayed as long as possible (until the upstream source closes the watch or times out, or you stop the controller). * h is the object you want notifications sent to. * clientState is the store you want to populate
func newInformer( lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, h ResourceEventHandler, clientState Store, ) Controller { // This will hold incoming changes. Note how we pass clientState in as a // KeyLister, that way resync operations will result in the correct set // of update/delete deltas. fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ KnownObjects: clientState, EmitDeltaTypeReplaced: true, }) cfg := &Config{ Queue: fifo, ListerWatcher: lw, ObjectType: objType, FullResyncPeriod: resyncPeriod, RetryOnError: false, Process: func(obj interface{}) error { // from oldest to newest for _, d := range obj.(Deltas) { switch d.Type { case Sync, Replaced, Added, Updated: if old, exists, err := clientState.Get(d.Object); err == nil && exists { if err := clientState.Update(d.Object); err != nil { return err } h.OnUpdate(old, d.Object) } else { if err := clientState.Add(d.Object); err != nil { return err } h.OnAdd(d.Object) } case Deleted: if err := clientState.Delete(d.Object); err != nil { return err } h.OnDelete(d.Object) } } return nil }, } return New(cfg) }
func NewInformer( lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, h ResourceEventHandler, ) (Store, Controller) { // This will hold the client state, as we know it. clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil) }
0.919282
armory/spinnaker-operator
vendor/k8s.io/client-go/tools/cache/controller.go
k8snetworkplumbingwg/multus-cni
vendor/k8s.io/client-go/tools/cache/controller.go
Apache-2.0
go
newInformer returns a controller for populating the store while also providing event notifications. Parameters * lw is list and watch functions for the source of the resource you want to be informed of. * objType is an object of the type that you expect to receive. * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate calls, even if nothing changed). Otherwise, re-list will be delayed as long as possible (until the upstream source closes the watch or times out, or you stop the controller). * h is the object you want notifications sent to. * clientState is the store you want to populate
func newInformer( lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, h ResourceEventHandler, clientState Store, ) Controller { // This will hold incoming changes. Note how we pass clientState in as a // KeyLister, that way resync operations will result in the correct set // of update/delete deltas. fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ KnownObjects: clientState, EmitDeltaTypeReplaced: true, }) cfg := &Config{ Queue: fifo, ListerWatcher: lw, ObjectType: objType, FullResyncPeriod: resyncPeriod, RetryOnError: false, Process: func(obj interface{}) error { // from oldest to newest for _, d := range obj.(Deltas) { switch d.Type { case Sync, Replaced, Added, Updated: if old, exists, err := clientState.Get(d.Object); err == nil && exists { if err := clientState.Update(d.Object); err != nil { return err } h.OnUpdate(old, d.Object) } else { if err := clientState.Add(d.Object); err != nil { return err } h.OnAdd(d.Object) } case Deleted: if err := clientState.Delete(d.Object); err != nil { return err } h.OnDelete(d.Object) } } return nil }, } return New(cfg) }
func NewInformer( lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, h ResourceEventHandler, ) (Store, Controller) { // This will hold the client state, as we know it. clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) options := InformerOptions{ ListerWatcher: lw, ObjectType: objType, Handler: h, ResyncPeriod: resyncPeriod, } return clientState, newInformer(clientState, options) }
0.900628
armory/spinnaker-operator
vendor/k8s.io/client-go/tools/cache/controller.go
tektoncd/cli
vendor/k8s.io/client-go/tools/cache/controller.go
Apache-2.0
go
newInformer returns a controller for populating the store while also providing event notifications. Parameters * lw is list and watch functions for the source of the resource you want to be informed of. * objType is an object of the type that you expect to receive. * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate calls, even if nothing changed). Otherwise, re-list will be delayed as long as possible (until the upstream source closes the watch or times out, or you stop the controller). * h is the object you want notifications sent to. * clientState is the store you want to populate
func newInformer( lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, h ResourceEventHandler, clientState Store, ) Controller { // This will hold incoming changes. Note how we pass clientState in as a // KeyLister, that way resync operations will result in the correct set // of update/delete deltas. fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ KnownObjects: clientState, EmitDeltaTypeReplaced: true, }) cfg := &Config{ Queue: fifo, ListerWatcher: lw, ObjectType: objType, FullResyncPeriod: resyncPeriod, RetryOnError: false, Process: func(obj interface{}) error { // from oldest to newest for _, d := range obj.(Deltas) { switch d.Type { case Sync, Replaced, Added, Updated: if old, exists, err := clientState.Get(d.Object); err == nil && exists { if err := clientState.Update(d.Object); err != nil { return err } h.OnUpdate(old, d.Object) } else { if err := clientState.Add(d.Object); err != nil { return err } h.OnAdd(d.Object) } case Deleted: if err := clientState.Delete(d.Object); err != nil { return err } h.OnDelete(d.Object) } } return nil }, } return New(cfg) }
func NewIndexerInformer( lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, h ResourceEventHandler, indexers Indexers, ) (Indexer, Controller) { // This will hold the client state, as we know it. clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil) }
0.771597
armory/spinnaker-operator
vendor/k8s.io/client-go/tools/cache/controller.go
k8snetworkplumbingwg/multus-cni
vendor/k8s.io/client-go/tools/cache/controller.go
Apache-2.0
go
NewInt64SumObserver creates a new integer SumObserver instrument with the given name, running in a batch callback, and customized with options. May return an error if the name is invalid (e.g., empty) or improperly registered (e.g., duplicate registration).
func (b BatchObserver) NewInt64SumObserver(name string, opts ...InstrumentOption) (Int64SumObserver, error) { if b.runner == nil { return wrapInt64SumObserverInstrument(NoopAsync{}, nil) } return wrapInt64SumObserverInstrument( b.meter.newAsync(name, SumObserverInstrumentKind, number.Int64Kind, opts, b.runner)) }
func (m Meter) NewInt64SumObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64SumObserver, error) { if callback == nil { return wrapInt64SumObserverInstrument(NoopAsync{}, nil) } return wrapInt64SumObserverInstrument( m.newAsync(name, SumObserverInstrumentKind, number.Int64Kind, opts, newInt64AsyncRunner(callback))) }
0.968162
config-syncer/config-syncer
vendor/go.opentelemetry.io/otel/metric/metric.go
config-syncer/config-syncer
vendor/go.opentelemetry.io/otel/metric/metric.go
Apache-2.0
go
NewInt64SumObserver creates a new integer SumObserver instrument with the given name, running in a batch callback, and customized with options. May return an error if the name is invalid (e.g., empty) or improperly registered (e.g., duplicate registration).
func (b BatchObserver) NewInt64SumObserver(name string, opts ...InstrumentOption) (Int64SumObserver, error) { if b.runner == nil { return wrapInt64SumObserverInstrument(NoopAsync{}, nil) } return wrapInt64SumObserverInstrument( b.meter.newAsync(name, SumObserverInstrumentKind, number.Int64Kind, opts, b.runner)) }
func (b BatchObserver) NewInt64UpDownSumObserver(name string, opts ...InstrumentOption) (Int64UpDownSumObserver, error) { if b.runner == nil { return wrapInt64UpDownSumObserverInstrument(NoopAsync{}, nil) } return wrapInt64UpDownSumObserverInstrument( b.meter.newAsync(name, UpDownSumObserverInstrumentKind, number.Int64Kind, opts, b.runner)) }
0.926465
config-syncer/config-syncer
vendor/go.opentelemetry.io/otel/metric/metric.go
config-syncer/config-syncer
vendor/go.opentelemetry.io/otel/metric/metric.go
Apache-2.0
go
NewInt64SumObserver creates a new integer SumObserver instrument with the given name, running in a batch callback, and customized with options. May return an error if the name is invalid (e.g., empty) or improperly registered (e.g., duplicate registration).
func (b BatchObserver) NewInt64SumObserver(name string, opts ...InstrumentOption) (Int64SumObserver, error) { if b.runner == nil { return wrapInt64SumObserverInstrument(NoopAsync{}, nil) } return wrapInt64SumObserverInstrument( b.meter.newAsync(name, SumObserverInstrumentKind, number.Int64Kind, opts, b.runner)) }
func (b BatchObserver) NewInt64ValueObserver(name string, opts ...InstrumentOption) (Int64ValueObserver, error) { if b.runner == nil { return wrapInt64ValueObserverInstrument(NoopAsync{}, nil) } return wrapInt64ValueObserverInstrument( b.meter.newAsync(name, ValueObserverInstrumentKind, number.Int64Kind, opts, b.runner)) }
0.906366
config-syncer/config-syncer
vendor/go.opentelemetry.io/otel/metric/metric.go
config-syncer/config-syncer
vendor/go.opentelemetry.io/otel/metric/metric.go
Apache-2.0
go
NewInt64SumObserver creates a new integer SumObserver instrument with the given name, running in a batch callback, and customized with options. May return an error if the name is invalid (e.g., empty) or improperly registered (e.g., duplicate registration).
func (b BatchObserver) NewInt64SumObserver(name string, opts ...InstrumentOption) (Int64SumObserver, error) { if b.runner == nil { return wrapInt64SumObserverInstrument(NoopAsync{}, nil) } return wrapInt64SumObserverInstrument( b.meter.newAsync(name, SumObserverInstrumentKind, number.Int64Kind, opts, b.runner)) }
func (m Meter) NewInt64UpDownSumObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64UpDownSumObserver, error) { if callback == nil { return wrapInt64UpDownSumObserverInstrument(NoopAsync{}, nil) } return wrapInt64UpDownSumObserverInstrument( m.newAsync(name, UpDownSumObserverInstrumentKind, number.Int64Kind, opts, newInt64AsyncRunner(callback))) }
0.899628
config-syncer/config-syncer
vendor/go.opentelemetry.io/otel/metric/metric.go
config-syncer/config-syncer
vendor/go.opentelemetry.io/otel/metric/metric.go
Apache-2.0
go
NewInt64SumObserver creates a new integer SumObserver instrument with the given name, running in a batch callback, and customized with options. May return an error if the name is invalid (e.g., empty) or improperly registered (e.g., duplicate registration).
func (b BatchObserver) NewInt64SumObserver(name string, opts ...InstrumentOption) (Int64SumObserver, error) { if b.runner == nil { return wrapInt64SumObserverInstrument(NoopAsync{}, nil) } return wrapInt64SumObserverInstrument( b.meter.newAsync(name, SumObserverInstrumentKind, number.Int64Kind, opts, b.runner)) }
func (b BatchObserver) NewFloat64SumObserver(name string, opts ...InstrumentOption) (Float64SumObserver, error) { if b.runner == nil { return wrapFloat64SumObserverInstrument(NoopAsync{}, nil) } return wrapFloat64SumObserverInstrument( b.meter.newAsync(name, SumObserverInstrumentKind, number.Float64Kind, opts, b.runner)) }
0.898173
config-syncer/config-syncer
vendor/go.opentelemetry.io/otel/metric/metric.go
config-syncer/config-syncer
vendor/go.opentelemetry.io/otel/metric/metric.go
Apache-2.0
go
NewForConfig creates a new DiscoveryV1Client for the given config. NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*DiscoveryV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } httpClient, err := rest.HTTPClientFor(&config) if err != nil { return nil, err } return NewForConfigAndClient(&config, httpClient) }
func NewForConfig(c *rest.Config) (*DiscoveryV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } httpClient, err := rest.HTTPClientFor(&config) if err != nil { return nil, err } return NewForConfigAndClient(&config, httpClient) }
0.98901
loggie-io/loggie
vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/discovery_client.go
loggie-io/loggie
vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go
Apache-2.0
go
NewForConfig creates a new DiscoveryV1Client for the given config. NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*DiscoveryV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } httpClient, err := rest.HTTPClientFor(&config) if err != nil { return nil, err } return NewForConfigAndClient(&config, httpClient) }
func NewForConfig(c *rest.Config) (*ConfigurationV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } httpClient, err := rest.HTTPClientFor(&config) if err != nil { return nil, err } return NewForConfigAndClient(&config, httpClient) }
0.979192
loggie-io/loggie
vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/discovery_client.go
Kong/kubernetes-ingress-controller
pkg/clientset/typed/configuration/v1/configuration_client.go
Apache-2.0
go
NewForConfig creates a new DiscoveryV1Client for the given config. NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*DiscoveryV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } httpClient, err := rest.HTTPClientFor(&config) if err != nil { return nil, err } return NewForConfigAndClient(&config, httpClient) }
func NewForConfig(c *rest.Config) (*CoreV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } httpClient, err := rest.HTTPClientFor(&config) if err != nil { return nil, err } return NewForConfigAndClient(&config, httpClient) }
0.97315
loggie-io/loggie
vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/discovery_client.go
loggie-io/loggie
vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go
Apache-2.0
go
NewForConfig creates a new DiscoveryV1Client for the given config. NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*DiscoveryV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } httpClient, err := rest.HTTPClientFor(&config) if err != nil { return nil, err } return NewForConfigAndClient(&config, httpClient) }
func NewForConfig(c *rest.Config) (*FlowcontrolV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } httpClient, err := rest.HTTPClientFor(&config) if err != nil { return nil, err } return NewForConfigAndClient(&config, httpClient) }
0.9718
loggie-io/loggie
vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/discovery_client.go
tektoncd/cli
vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowcontrol_client.go
Apache-2.0
go
NewForConfig creates a new DiscoveryV1Client for the given config. NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*DiscoveryV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } httpClient, err := rest.HTTPClientFor(&config) if err != nil { return nil, err } return NewForConfigAndClient(&config, httpClient) }
func NewForConfig(c *rest.Config) (*ConfigurationV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } httpClient, err := rest.HTTPClientFor(&config) if err != nil { return nil, err } return NewForConfigAndClient(&config, httpClient) }
0.971004
loggie-io/loggie
vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/discovery_client.go
Kong/kubernetes-ingress-controller
pkg/clientset/typed/configuration/v1alpha1/configuration_client.go
Apache-2.0
go
LookupEntry get an entry by target resource name. This method allows clients to use the resource name from the source Google Cloud Platform service to get the Data Catalog Entry. Deprecated: LookupEntry may be removed in a future version.
func (c *Client) LookupEntry(ctx context.Context, req *datacatalogpb.LookupEntryRequest, opts ...gax.CallOption) (*datacatalogpb.Entry, error) { return c.internalClient.LookupEntry(ctx, req, opts...) }
func (c *restClient) LookupEntry(ctx context.Context, req *datacatalogpb.LookupEntryRequest, opts ...gax.CallOption) (*datacatalogpb.Entry, error) { baseUrl, err := url.Parse(c.endpoint) if err != nil { return nil, err } baseUrl.Path += fmt.Sprintf("/v1/entries:lookup") params := url.Values{} if req.GetFullyQualifiedName() != "" { params.Add("fullyQualifiedName", fmt.Sprintf("%v", req.GetFullyQualifiedName())) } if req.GetLinkedResource() != "" { params.Add("linkedResource", fmt.Sprintf("%v", req.GetLinkedResource())) } if req.GetLocation() != "" { params.Add("location", fmt.Sprintf("%v", req.GetLocation())) } if req.GetProject() != "" { params.Add("project", fmt.Sprintf("%v", req.GetProject())) } if req.GetSqlResource() != "" { params.Add("sqlResource", fmt.Sprintf("%v", req.GetSqlResource())) } baseUrl.RawQuery = params.Encode() // Build HTTP headers from client and context metadata. hds := append(c.xGoogHeaders, "Content-Type", "application/json") headers := gax.BuildHeaders(ctx, hds...) opts = append((*c.CallOptions).LookupEntry[0:len((*c.CallOptions).LookupEntry):len((*c.CallOptions).LookupEntry)], opts...) unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} resp := &datacatalogpb.Entry{} e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { if settings.Path != "" { baseUrl.Path = settings.Path } httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) if err != nil { return err } httpReq = httpReq.WithContext(ctx) httpReq.Header = headers buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "LookupEntry") if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { return err } return nil }, opts...) if e != nil { return nil, e } return resp, nil }
0.942504
googleapis/google-cloud-go
datacatalog/apiv1beta1/data_catalog_client.go
googleapis/google-cloud-go
datacatalog/apiv1/data_catalog_client.go
Apache-2.0
go
LookupEntry get an entry by target resource name. This method allows clients to use the resource name from the source Google Cloud Platform service to get the Data Catalog Entry. Deprecated: LookupEntry may be removed in a future version.
func (c *Client) LookupEntry(ctx context.Context, req *datacatalogpb.LookupEntryRequest, opts ...gax.CallOption) (*datacatalogpb.Entry, error) { return c.internalClient.LookupEntry(ctx, req, opts...) }
func (c *Client) DeleteEntry(ctx context.Context, req *datacatalogpb.DeleteEntryRequest, opts ...gax.CallOption) error { return c.internalClient.DeleteEntry(ctx, req, opts...) }
0.559464
googleapis/google-cloud-go
datacatalog/apiv1beta1/data_catalog_client.go
googleapis/google-cloud-go
datacatalog/apiv1beta1/data_catalog_client.go
Apache-2.0
go
LookupEntry get an entry by target resource name. This method allows clients to use the resource name from the source Google Cloud Platform service to get the Data Catalog Entry. Deprecated: LookupEntry may be removed in a future version.
func (c *Client) LookupEntry(ctx context.Context, req *datacatalogpb.LookupEntryRequest, opts ...gax.CallOption) (*datacatalogpb.Entry, error) { return c.internalClient.LookupEntry(ctx, req, opts...) }
func (c *Client) GetEntry(ctx context.Context, req *datacatalogpb.GetEntryRequest, opts ...gax.CallOption) (*datacatalogpb.Entry, error) { return c.internalClient.GetEntry(ctx, req, opts...) }
0.553578
googleapis/google-cloud-go
datacatalog/apiv1beta1/data_catalog_client.go
googleapis/google-cloud-go
datacatalog/apiv1beta1/data_catalog_client.go
Apache-2.0
go
LookupEntry get an entry by target resource name. This method allows clients to use the resource name from the source Google Cloud Platform service to get the Data Catalog Entry. Deprecated: LookupEntry may be removed in a future version.
func (c *Client) LookupEntry(ctx context.Context, req *datacatalogpb.LookupEntryRequest, opts ...gax.CallOption) (*datacatalogpb.Entry, error) { return c.internalClient.LookupEntry(ctx, req, opts...) }
func (c *Client) ByCatalogKindName(ctx context.Context, p *ByCatalogKindNamePayload) (res *Resource, err error) { var ires any ires, err = c.ByCatalogKindNameEndpoint(ctx, p) if err != nil { return } return ires.(*Resource), nil }
0.5511
googleapis/google-cloud-go
datacatalog/apiv1beta1/data_catalog_client.go
tektoncd/cli
vendor/github.com/tektoncd/hub/api/v1/gen/resource/client.go
Apache-2.0
go
LookupEntry get an entry by target resource name. This method allows clients to use the resource name from the source Google Cloud Platform service to get the Data Catalog Entry. Deprecated: LookupEntry may be removed in a future version.
func (c *Client) LookupEntry(ctx context.Context, req *datacatalogpb.LookupEntryRequest, opts ...gax.CallOption) (*datacatalogpb.Entry, error) { return c.internalClient.LookupEntry(ctx, req, opts...) }
func (c *Client) LookupKey(ctx context.Context, req *apikeyspb.LookupKeyRequest, opts ...gax.CallOption) (*apikeyspb.LookupKeyResponse, error) { return c.internalClient.LookupKey(ctx, req, opts...) }
0.54585
googleapis/google-cloud-go
datacatalog/apiv1beta1/data_catalog_client.go
googleapis/google-cloud-go
apikeys/apiv2/api_keys_client.go
Apache-2.0
go