go.mod: bump github.com/getkin/kin-openapi to v0.131.0
As deepmap/oapi-codegen didn't work with this newer version, upgrade to oapi-codegen/oapi-codegen v2. Mitigating CVE-2025-30153
This commit is contained in:
parent
c5cb0d0618
commit
b2700903ae
403 changed files with 44758 additions and 16347 deletions
93
vendor/github.com/vmware-labs/yaml-jsonpath/pkg/yamlpath/comparison.go
generated
vendored
Normal file
93
vendor/github.com/vmware-labs/yaml-jsonpath/pkg/yamlpath/comparison.go
generated
vendored
Normal file
|
|
@ -0,0 +1,93 @@
|
|||
/*
|
||||
* Copyright 2020 VMware, Inc.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package yamlpath
|
||||
|
||||
import "strconv"
|
||||
|
||||
type comparison int
|
||||
|
||||
const (
|
||||
compareLessThan comparison = iota
|
||||
compareEqual
|
||||
compareGreaterThan
|
||||
compareIncomparable
|
||||
)
|
||||
|
||||
type orderingOperator string
|
||||
|
||||
const (
|
||||
operatorLessThan orderingOperator = "<"
|
||||
operatorLessThanOrEqual orderingOperator = "<="
|
||||
operatorGreaterThan orderingOperator = ">"
|
||||
operatorGreaterThanOrEqual orderingOperator = ">="
|
||||
)
|
||||
|
||||
func (o orderingOperator) String() string {
|
||||
return string(o)
|
||||
}
|
||||
|
||||
type comparator func(comparison) bool
|
||||
|
||||
func equal(c comparison) bool {
|
||||
return c == compareEqual
|
||||
}
|
||||
|
||||
func notEqual(c comparison) bool {
|
||||
return c != compareEqual
|
||||
}
|
||||
|
||||
func greaterThan(c comparison) bool {
|
||||
return c == compareGreaterThan
|
||||
}
|
||||
|
||||
func greaterThanOrEqual(c comparison) bool {
|
||||
return c == compareGreaterThan || c == compareEqual
|
||||
}
|
||||
|
||||
func lessThan(c comparison) bool {
|
||||
return c == compareLessThan
|
||||
}
|
||||
|
||||
func lessThanOrEqual(c comparison) bool {
|
||||
return c == compareLessThan || c == compareEqual
|
||||
}
|
||||
|
||||
func compareStrings(a, b string) comparison {
|
||||
if a == b {
|
||||
return compareEqual
|
||||
}
|
||||
return compareIncomparable
|
||||
}
|
||||
|
||||
func compareFloat64(lhs, rhs float64) comparison {
|
||||
if lhs < rhs {
|
||||
return compareLessThan
|
||||
}
|
||||
if lhs > rhs {
|
||||
return compareGreaterThan
|
||||
}
|
||||
return compareEqual
|
||||
}
|
||||
|
||||
// compareNodeValues compares two values each of which may be a string, integer, or float
|
||||
func compareNodeValues(lhs, rhs typedValue) comparison {
|
||||
if lhs.typ.isNumeric() && rhs.typ.isNumeric() {
|
||||
return compareFloat64(mustParseFloat64(lhs.val), mustParseFloat64(rhs.val))
|
||||
}
|
||||
if (lhs.typ != stringValueType && !lhs.typ.isNumeric()) || (rhs.typ != stringValueType && !rhs.typ.isNumeric()) {
|
||||
panic("invalid type of value passed to compareNodeValues") // should never happen
|
||||
}
|
||||
return compareStrings(lhs.val, rhs.val)
|
||||
}
|
||||
|
||||
func mustParseFloat64(s string) float64 {
|
||||
f, err := strconv.ParseFloat(s, 64)
|
||||
if err != nil {
|
||||
panic("invalid numeric value " + s) // should never happen
|
||||
}
|
||||
return f
|
||||
}
|
||||
8
vendor/github.com/vmware-labs/yaml-jsonpath/pkg/yamlpath/doc.go
generated
vendored
Normal file
8
vendor/github.com/vmware-labs/yaml-jsonpath/pkg/yamlpath/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
/*
|
||||
* Copyright 2020 VMware, Inc.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
// Package yamlpath provides YAML node searching using path notation.
|
||||
package yamlpath
|
||||
297
vendor/github.com/vmware-labs/yaml-jsonpath/pkg/yamlpath/filter.go
generated
vendored
Normal file
297
vendor/github.com/vmware-labs/yaml-jsonpath/pkg/yamlpath/filter.go
generated
vendored
Normal file
|
|
@ -0,0 +1,297 @@
|
|||
/*
|
||||
* Copyright 2020 VMware, Inc.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package yamlpath
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
type filter func(node, root *yaml.Node) bool
|
||||
|
||||
func newFilter(n *filterNode) filter {
|
||||
if n == nil {
|
||||
return never
|
||||
}
|
||||
|
||||
switch n.lexeme.typ {
|
||||
case lexemeFilterAt, lexemeRoot:
|
||||
path := pathFilterScanner(n)
|
||||
return func(node, root *yaml.Node) bool {
|
||||
return len(path(node, root)) > 0
|
||||
}
|
||||
|
||||
case lexemeFilterEquality, lexemeFilterInequality,
|
||||
lexemeFilterGreaterThan, lexemeFilterGreaterThanOrEqual,
|
||||
lexemeFilterLessThan, lexemeFilterLessThanOrEqual:
|
||||
return comparisonFilter(n)
|
||||
|
||||
case lexemeFilterMatchesRegularExpression:
|
||||
return matchRegularExpression(n)
|
||||
|
||||
case lexemeFilterNot:
|
||||
f := newFilter(n.children[0])
|
||||
return func(node, root *yaml.Node) bool {
|
||||
return !f(node, root)
|
||||
}
|
||||
|
||||
case lexemeFilterOr:
|
||||
f1 := newFilter(n.children[0])
|
||||
f2 := newFilter(n.children[1])
|
||||
return func(node, root *yaml.Node) bool {
|
||||
return f1(node, root) || f2(node, root)
|
||||
}
|
||||
|
||||
case lexemeFilterAnd:
|
||||
f1 := newFilter(n.children[0])
|
||||
f2 := newFilter(n.children[1])
|
||||
return func(node, root *yaml.Node) bool {
|
||||
return f1(node, root) && f2(node, root)
|
||||
}
|
||||
|
||||
case lexemeFilterBooleanLiteral:
|
||||
b, err := strconv.ParseBool(n.lexeme.val)
|
||||
if err != nil {
|
||||
panic(err) // should not happen
|
||||
}
|
||||
return func(node, root *yaml.Node) bool {
|
||||
return b
|
||||
}
|
||||
|
||||
default:
|
||||
return never
|
||||
}
|
||||
}
|
||||
|
||||
func never(node, root *yaml.Node) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func comparisonFilter(n *filterNode) filter {
|
||||
compare := func(b bool) bool {
|
||||
var c comparison
|
||||
if b {
|
||||
c = compareEqual
|
||||
} else {
|
||||
c = compareIncomparable
|
||||
}
|
||||
return n.lexeme.comparator()(c)
|
||||
}
|
||||
return nodeToFilter(n, func(l, r typedValue) bool {
|
||||
if !l.typ.compatibleWith(r.typ) {
|
||||
return compare(false)
|
||||
}
|
||||
switch l.typ {
|
||||
case booleanValueType:
|
||||
return compare(equalBooleans(l.val, r.val))
|
||||
|
||||
case nullValueType:
|
||||
return compare(equalNulls(l.val, r.val))
|
||||
|
||||
default:
|
||||
return n.lexeme.comparator()(compareNodeValues(l, r))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var x, y typedValue
|
||||
|
||||
func init() {
|
||||
x = typedValue{stringValueType, "x"}
|
||||
y = typedValue{stringValueType, "y"}
|
||||
}
|
||||
|
||||
func nodeToFilter(n *filterNode, accept func(typedValue, typedValue) bool) filter {
|
||||
lhsPath := newFilterScanner(n.children[0])
|
||||
rhsPath := newFilterScanner(n.children[1])
|
||||
return func(node, root *yaml.Node) (result bool) {
|
||||
// perform a set-wise comparison of the values in each path
|
||||
match := false
|
||||
for _, l := range lhsPath(node, root) {
|
||||
for _, r := range rhsPath(node, root) {
|
||||
if !accept(l, r) {
|
||||
return false
|
||||
}
|
||||
match = true
|
||||
}
|
||||
}
|
||||
return match
|
||||
}
|
||||
}
|
||||
|
||||
func equalBooleans(l, r string) bool {
|
||||
// Note: the YAML parser and our JSONPath lexer both rule out invalid boolean literals such as tRue.
|
||||
return strings.EqualFold(l, r)
|
||||
}
|
||||
|
||||
func equalNulls(l, r string) bool {
|
||||
// Note: the YAML parser and our JSONPath lexer both rule out invalid null literals such as nUll.
|
||||
return true
|
||||
}
|
||||
|
||||
// filterScanner is a function that returns a slice of typed values from either a filter literal or a path expression
|
||||
// which refers to either the current node or the root node. It is used in filter comparisons.
|
||||
type filterScanner func(node, root *yaml.Node) []typedValue
|
||||
|
||||
func emptyScanner(*yaml.Node, *yaml.Node) []typedValue {
|
||||
return []typedValue{}
|
||||
}
|
||||
|
||||
func newFilterScanner(n *filterNode) filterScanner {
|
||||
switch {
|
||||
case n == nil:
|
||||
return emptyScanner
|
||||
|
||||
case n.isItemFilter():
|
||||
return pathFilterScanner(n)
|
||||
|
||||
case n.isLiteral():
|
||||
return literalFilterScanner(n)
|
||||
|
||||
default:
|
||||
return emptyScanner
|
||||
}
|
||||
}
|
||||
|
||||
func pathFilterScanner(n *filterNode) filterScanner {
|
||||
var at bool
|
||||
switch n.lexeme.typ {
|
||||
case lexemeFilterAt:
|
||||
at = true
|
||||
case lexemeRoot:
|
||||
at = false
|
||||
default:
|
||||
panic("false precondition")
|
||||
}
|
||||
subpath := ""
|
||||
for _, lexeme := range n.subpath {
|
||||
subpath += lexeme.val
|
||||
}
|
||||
path, err := NewPath(subpath)
|
||||
if err != nil {
|
||||
return emptyScanner
|
||||
}
|
||||
return func(node, root *yaml.Node) []typedValue {
|
||||
if at {
|
||||
return values(path.Find(node))
|
||||
}
|
||||
return values(path.Find(root))
|
||||
}
|
||||
}
|
||||
|
||||
type valueType int
|
||||
|
||||
const (
|
||||
unknownValueType valueType = iota
|
||||
stringValueType
|
||||
intValueType
|
||||
floatValueType
|
||||
booleanValueType
|
||||
nullValueType
|
||||
regularExpressionValueType
|
||||
)
|
||||
|
||||
func (vt valueType) isNumeric() bool {
|
||||
return vt == intValueType || vt == floatValueType
|
||||
}
|
||||
|
||||
func (vt valueType) compatibleWith(vt2 valueType) bool {
|
||||
return vt.isNumeric() && vt2.isNumeric() || vt == vt2 || vt == stringValueType && vt2 == regularExpressionValueType
|
||||
}
|
||||
|
||||
type typedValue struct {
|
||||
typ valueType
|
||||
val string
|
||||
}
|
||||
|
||||
const (
|
||||
nullTag = "!!null"
|
||||
boolTag = "!!bool"
|
||||
strTag = "!!str"
|
||||
intTag = "!!int"
|
||||
floatTag = "!!float"
|
||||
)
|
||||
|
||||
func typedValueOfNode(node *yaml.Node) typedValue {
|
||||
var t valueType = unknownValueType
|
||||
if node.Kind == yaml.ScalarNode {
|
||||
switch node.ShortTag() {
|
||||
case nullTag:
|
||||
t = nullValueType
|
||||
|
||||
case boolTag:
|
||||
t = booleanValueType
|
||||
|
||||
case strTag:
|
||||
t = stringValueType
|
||||
|
||||
case intTag:
|
||||
t = intValueType
|
||||
|
||||
case floatTag:
|
||||
t = floatValueType
|
||||
}
|
||||
}
|
||||
|
||||
return typedValue{
|
||||
typ: t,
|
||||
val: node.Value,
|
||||
}
|
||||
}
|
||||
|
||||
func newTypedValue(t valueType, v string) typedValue {
|
||||
return typedValue{
|
||||
typ: t,
|
||||
val: v,
|
||||
}
|
||||
}
|
||||
|
||||
func typedValueOfString(s string) typedValue {
|
||||
return newTypedValue(stringValueType, s)
|
||||
}
|
||||
|
||||
func typedValueOfInt(i string) typedValue {
|
||||
return newTypedValue(intValueType, i)
|
||||
}
|
||||
|
||||
func typedValueOfFloat(f string) typedValue {
|
||||
return newTypedValue(floatValueType, f)
|
||||
}
|
||||
|
||||
func values(nodes []*yaml.Node, err error) []typedValue {
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %v", err)) // should never happen
|
||||
}
|
||||
v := []typedValue{}
|
||||
for _, n := range nodes {
|
||||
v = append(v, typedValueOfNode(n))
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func literalFilterScanner(n *filterNode) filterScanner {
|
||||
v := n.lexeme.literalValue()
|
||||
return func(node, root *yaml.Node) []typedValue {
|
||||
return []typedValue{v}
|
||||
}
|
||||
}
|
||||
|
||||
func matchRegularExpression(parseTree *filterNode) filter {
|
||||
return nodeToFilter(parseTree, stringMatchesRegularExpression)
|
||||
}
|
||||
|
||||
func stringMatchesRegularExpression(s, expr typedValue) bool {
|
||||
if s.typ != stringValueType || expr.typ != regularExpressionValueType {
|
||||
return false // can't compare types so return false
|
||||
}
|
||||
re, _ := regexp.Compile(expr.val) // regex already compiled during lexing
|
||||
return re.Match([]byte(s.val))
|
||||
}
|
||||
295
vendor/github.com/vmware-labs/yaml-jsonpath/pkg/yamlpath/filter_parser.go
generated
vendored
Normal file
295
vendor/github.com/vmware-labs/yaml-jsonpath/pkg/yamlpath/filter_parser.go
generated
vendored
Normal file
|
|
@ -0,0 +1,295 @@
|
|||
/*
|
||||
* Copyright 2020 VMware, Inc.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package yamlpath
|
||||
|
||||
/*
|
||||
filterNode represents a node of a filter expression parse tree. Each node is labelled with a lexeme.
|
||||
|
||||
Terminal nodes have one of the following lexemes: root, lexemeFilterAt, lexemeFilterIntegerLiteral,
|
||||
lexemeFilterFloatLiteral, lexemeFilterStringLiteral, lexemeFilterBooleanLiteral.
|
||||
root and lexemeFilterAt nodes also have a slice of lexemes representing the subpath of `$`` or `@``,
|
||||
respectively.
|
||||
|
||||
Non-terminal nodes represent either basic filters (simpler predicates of one or two terminal
|
||||
nodes) or filter expressions (more complex predicates of basic filters). A filter existence expression
|
||||
is represented as a terminal node with lexemeFilterAt or (less commonly) root.
|
||||
|
||||
The following examples illustrate the approach.
|
||||
|
||||
The basic filter `@.child > 3` is represented as the following parse tree (where each node is indicated by
|
||||
its lexeme and `<...>` represents the node's children):
|
||||
|
||||
lexemeFilterGreaterThan<lexemeFilterAt,lexemeFilterIntegerLiteral>
|
||||
|
||||
or, graphically:
|
||||
|
||||
>
|
||||
/ \
|
||||
@.child 3
|
||||
|
||||
The filter expression `@.child > 3 && @.other` is represented as the parse tree:
|
||||
|
||||
lexemeFilterConjunction<lexemeFilterGreaterThan<lexemeFilterAt,lexemeFilterIntegerLiteral>,lexemeFilterAt>
|
||||
|
||||
or, graphically:
|
||||
|
||||
&&
|
||||
/ \
|
||||
> @.other
|
||||
/ \
|
||||
@.child 3
|
||||
|
||||
The filter expression `(@.child < 5 || @.child > 10) && @.other == 'x'` is represented as the parse tree:
|
||||
|
||||
lexemeFilterConjunction<lexemeFilterDisjunction<lexemeFilterLessThan<lexemeFilterAt,lexemeFilterIntegerLiteral>,
|
||||
lexemeFilterGreaterThan<lexemeFilterAt,lexemeFilterIntegerLiteral>
|
||||
>,
|
||||
lexemeFilterEquality<lexemeFilterAt,lexemeFilterStringLiteral>
|
||||
>
|
||||
|
||||
or, graphically:
|
||||
|
||||
&&
|
||||
/ \
|
||||
|| ==
|
||||
/ \ / \
|
||||
< > @.other 'x'
|
||||
/ \ / \
|
||||
@.child 5 @.child 10
|
||||
|
||||
Note that brackets do not appear in the parse tree.
|
||||
*/
|
||||
type filterNode struct {
|
||||
lexeme lexeme
|
||||
subpath []lexeme // empty unless lexeme is root or lexemeFilterAt
|
||||
children []*filterNode
|
||||
}
|
||||
|
||||
func newFilterNode(lexemes []lexeme) *filterNode {
|
||||
return newParser(lexemes).parse()
|
||||
}
|
||||
|
||||
func (n *filterNode) isItemFilter() bool {
|
||||
return n.lexeme.typ == lexemeFilterAt || n.lexeme.typ == lexemeRoot
|
||||
}
|
||||
|
||||
func (n *filterNode) isLiteral() bool {
|
||||
return n.isStringLiteral() || n.isBooleanLiteral() || n.isNullLiteral() || n.isNumericLiteral() || n.isRegularExpressionLiteral()
|
||||
}
|
||||
|
||||
func (n *filterNode) isStringLiteral() bool {
|
||||
return n.lexeme.typ == lexemeFilterStringLiteral
|
||||
}
|
||||
|
||||
func (n *filterNode) isBooleanLiteral() bool {
|
||||
return n.lexeme.typ == lexemeFilterBooleanLiteral
|
||||
}
|
||||
|
||||
func (n *filterNode) isNullLiteral() bool {
|
||||
return n.lexeme.typ == lexemeFilterNullLiteral
|
||||
}
|
||||
|
||||
func (n *filterNode) isNumericLiteral() bool {
|
||||
return n.lexeme.typ == lexemeFilterFloatLiteral || n.lexeme.typ == lexemeFilterIntegerLiteral
|
||||
}
|
||||
|
||||
func (n *filterNode) isRegularExpressionLiteral() bool {
|
||||
return n.lexeme.typ == lexemeFilterRegularExpressionLiteral
|
||||
}
|
||||
|
||||
// parser holds the state of the filter expression parser.
|
||||
type parser struct {
|
||||
input []lexeme // the lexemes being scanned
|
||||
pos int // current position in the input
|
||||
stack []*filterNode // parser stack
|
||||
tree *filterNode // parse tree
|
||||
}
|
||||
|
||||
// newParser creates a new parser for the input slice of lexemes.
|
||||
func newParser(input []lexeme) *parser {
|
||||
l := &parser{
|
||||
input: input,
|
||||
stack: make([]*filterNode, 0),
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// push pushes a parse tree on the stack.
|
||||
func (p *parser) push(tree *filterNode) {
|
||||
p.stack = append(p.stack, tree)
|
||||
}
|
||||
|
||||
// pop pops a parse tree from the stack, which must be non-empty.
|
||||
func (p *parser) pop() *filterNode {
|
||||
index := len(p.stack) - 1
|
||||
element := p.stack[index]
|
||||
p.stack = p.stack[:index]
|
||||
return element
|
||||
}
|
||||
|
||||
// nextLexeme returns the next item from the input.
|
||||
// The caller must peek to ensure there is more input before calling nextLexeme.
|
||||
func (p *parser) nextLexeme() lexeme {
|
||||
next := p.input[p.pos]
|
||||
p.pos++
|
||||
return next
|
||||
}
|
||||
|
||||
// peek returns the next item from the input without consuming the item.
|
||||
func (p *parser) peek() lexeme {
|
||||
if p.pos >= len(p.input) {
|
||||
return lexeme{lexemeEOF, ""}
|
||||
}
|
||||
return p.input[p.pos]
|
||||
}
|
||||
|
||||
func (p *parser) parse() *filterNode {
|
||||
if p.peek().typ == lexemeEOF {
|
||||
return nil
|
||||
}
|
||||
p.expression()
|
||||
return p.tree
|
||||
}
|
||||
|
||||
func (p *parser) expression() {
|
||||
p.conjunction()
|
||||
for p.peek().typ == lexemeFilterOr {
|
||||
p.push(p.tree)
|
||||
p.or()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) or() {
|
||||
n := p.nextLexeme()
|
||||
p.conjunction()
|
||||
p.tree = &filterNode{
|
||||
lexeme: n,
|
||||
subpath: []lexeme{},
|
||||
children: []*filterNode{
|
||||
p.pop(),
|
||||
p.tree,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) conjunction() {
|
||||
p.basicFilter()
|
||||
for p.peek().typ == lexemeFilterAnd {
|
||||
p.push(p.tree)
|
||||
p.and()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) and() {
|
||||
n := p.nextLexeme()
|
||||
p.basicFilter()
|
||||
p.tree = &filterNode{
|
||||
lexeme: n,
|
||||
subpath: []lexeme{},
|
||||
children: []*filterNode{
|
||||
p.pop(),
|
||||
p.tree,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// basicFilter consumes then next basic filter and sets it as the parser's tree. If a basic filter it not next, nil is set.
|
||||
func (p *parser) basicFilter() {
|
||||
n := p.peek()
|
||||
switch n.typ {
|
||||
case lexemeFilterNot:
|
||||
p.nextLexeme()
|
||||
p.basicFilter()
|
||||
p.tree = &filterNode{
|
||||
lexeme: n,
|
||||
subpath: []lexeme{},
|
||||
children: []*filterNode{
|
||||
p.tree,
|
||||
},
|
||||
}
|
||||
return
|
||||
|
||||
case lexemeFilterOpenBracket:
|
||||
p.nextLexeme()
|
||||
p.expression()
|
||||
if p.peek().typ == lexemeFilterCloseBracket {
|
||||
p.nextLexeme()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
p.filterTerm()
|
||||
n = p.peek()
|
||||
if n.typ.isComparisonOrMatch() {
|
||||
p.nextLexeme()
|
||||
filterTerm := p.tree
|
||||
p.filterTerm()
|
||||
p.tree = &filterNode{
|
||||
lexeme: n,
|
||||
subpath: []lexeme{},
|
||||
children: []*filterNode{
|
||||
filterTerm,
|
||||
p.tree,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// filterTerm consumes the next filter term and sets it as the parser's tree. If a filter term is not next, nil is set.
|
||||
func (p *parser) filterTerm() {
|
||||
n := p.peek()
|
||||
switch n.typ {
|
||||
case lexemeEOF, lexemeError:
|
||||
p.tree = nil
|
||||
|
||||
case lexemeFilterAt, lexemeRoot:
|
||||
p.nextLexeme()
|
||||
subpath := []lexeme{}
|
||||
filterNestingLevel := 1
|
||||
f:
|
||||
for {
|
||||
s := p.peek()
|
||||
switch s.typ {
|
||||
case lexemeIdentity, lexemeDotChild, lexemeBracketChild, lexemeRecursiveDescent, lexemeArraySubscript:
|
||||
|
||||
case lexemeFilterBegin:
|
||||
filterNestingLevel++
|
||||
|
||||
case lexemeFilterEnd:
|
||||
filterNestingLevel--
|
||||
if filterNestingLevel == 0 {
|
||||
break f
|
||||
}
|
||||
|
||||
case lexemeEOF:
|
||||
break f
|
||||
|
||||
default:
|
||||
// allow any other lexemes only in a nested filter
|
||||
if filterNestingLevel == 1 {
|
||||
break f
|
||||
}
|
||||
}
|
||||
subpath = append(subpath, s)
|
||||
p.nextLexeme()
|
||||
}
|
||||
p.tree = &filterNode{
|
||||
lexeme: n,
|
||||
subpath: subpath,
|
||||
children: []*filterNode{},
|
||||
}
|
||||
|
||||
case lexemeFilterIntegerLiteral, lexemeFilterFloatLiteral, lexemeFilterStringLiteral, lexemeFilterBooleanLiteral,
|
||||
lexemeFilterNullLiteral, lexemeFilterRegularExpressionLiteral:
|
||||
p.nextLexeme()
|
||||
p.tree = &filterNode{
|
||||
lexeme: n,
|
||||
subpath: []lexeme{},
|
||||
children: []*filterNode{},
|
||||
}
|
||||
}
|
||||
}
|
||||
1011
vendor/github.com/vmware-labs/yaml-jsonpath/pkg/yamlpath/lexer.go
generated
vendored
Normal file
1011
vendor/github.com/vmware-labs/yaml-jsonpath/pkg/yamlpath/lexer.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
463
vendor/github.com/vmware-labs/yaml-jsonpath/pkg/yamlpath/path.go
generated
vendored
Normal file
463
vendor/github.com/vmware-labs/yaml-jsonpath/pkg/yamlpath/path.go
generated
vendored
Normal file
|
|
@ -0,0 +1,463 @@
|
|||
/*
|
||||
* Copyright 2020 VMware, Inc.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package yamlpath
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/dprotaso/go-yit"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// Path is a compiled YAML path expression.
|
||||
type Path struct {
|
||||
f func(node, root *yaml.Node) yit.Iterator
|
||||
}
|
||||
|
||||
// Find applies the Path to a YAML node and returns the addresses of the subnodes which match the Path.
|
||||
func (p *Path) Find(node *yaml.Node) ([]*yaml.Node, error) {
|
||||
return p.find(node, node), nil // currently, errors are not possible
|
||||
}
|
||||
|
||||
func (p *Path) find(node, root *yaml.Node) []*yaml.Node {
|
||||
return p.f(node, root).ToArray()
|
||||
}
|
||||
|
||||
// NewPath constructs a Path from a string expression.
|
||||
func NewPath(path string) (*Path, error) {
|
||||
return newPath(lex("Path lexer", path))
|
||||
}
|
||||
|
||||
func newPath(l *lexer) (*Path, error) {
|
||||
lx := l.nextLexeme()
|
||||
|
||||
switch lx.typ {
|
||||
|
||||
case lexemeError:
|
||||
return nil, errors.New(lx.val)
|
||||
|
||||
case lexemeIdentity, lexemeEOF:
|
||||
return new(identity), nil
|
||||
|
||||
case lexemeRoot:
|
||||
subPath, err := newPath(l)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return new(func(node, root *yaml.Node) yit.Iterator {
|
||||
if node.Kind == yaml.DocumentNode {
|
||||
node = node.Content[0]
|
||||
}
|
||||
return compose(yit.FromNode(node), subPath, root)
|
||||
}), nil
|
||||
|
||||
case lexemeRecursiveDescent:
|
||||
subPath, err := newPath(l)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
childName := strings.TrimPrefix(lx.val, "..")
|
||||
switch childName {
|
||||
case "*":
|
||||
// includes all nodes, not just mapping nodes
|
||||
return new(func(node, root *yaml.Node) yit.Iterator {
|
||||
return compose(yit.FromNode(node).RecurseNodes(), allChildrenThen(subPath), root)
|
||||
}), nil
|
||||
|
||||
case "":
|
||||
return new(func(node, root *yaml.Node) yit.Iterator {
|
||||
return compose(yit.FromNode(node).RecurseNodes(), subPath, root)
|
||||
}), nil
|
||||
|
||||
default:
|
||||
return new(func(node, root *yaml.Node) yit.Iterator {
|
||||
return compose(yit.FromNode(node).RecurseNodes(), childThen(childName, subPath), root)
|
||||
}), nil
|
||||
}
|
||||
|
||||
case lexemeDotChild:
|
||||
subPath, err := newPath(l)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
childName := strings.TrimPrefix(lx.val, ".")
|
||||
|
||||
return childThen(childName, subPath), nil
|
||||
|
||||
case lexemeUndottedChild:
|
||||
subPath, err := newPath(l)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return childThen(lx.val, subPath), nil
|
||||
|
||||
case lexemeBracketChild:
|
||||
subPath, err := newPath(l)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
childNames := strings.TrimSpace(lx.val)
|
||||
childNames = strings.TrimSuffix(strings.TrimPrefix(childNames, "["), "]")
|
||||
childNames = strings.TrimSpace(childNames)
|
||||
return bracketChildThen(childNames, subPath), nil
|
||||
|
||||
case lexemeArraySubscript:
|
||||
subPath, err := newPath(l)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
subscript := strings.TrimSuffix(strings.TrimPrefix(lx.val, "["), "]")
|
||||
return arraySubscriptThen(subscript, subPath), nil
|
||||
|
||||
case lexemeFilterBegin, lexemeRecursiveFilterBegin:
|
||||
var recursive bool
|
||||
|
||||
if lx.typ == lexemeRecursiveFilterBegin {
|
||||
recursive = true
|
||||
}
|
||||
filterLexemes := []lexeme{}
|
||||
filterNestingLevel := 1
|
||||
f:
|
||||
for {
|
||||
lx := l.nextLexeme()
|
||||
switch lx.typ {
|
||||
case lexemeFilterBegin:
|
||||
filterNestingLevel++
|
||||
case lexemeFilterEnd:
|
||||
filterNestingLevel--
|
||||
if filterNestingLevel == 0 {
|
||||
break f
|
||||
}
|
||||
case lexemeError:
|
||||
return nil, errors.New(lx.val)
|
||||
|
||||
case lexemeEOF:
|
||||
// should never happen as lexer should have detected an error
|
||||
return nil, errors.New("missing end of filter")
|
||||
}
|
||||
filterLexemes = append(filterLexemes, lx)
|
||||
}
|
||||
|
||||
subPath, err := newPath(l)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if recursive {
|
||||
return recursiveFilterThen(filterLexemes, subPath), nil
|
||||
}
|
||||
return filterThen(filterLexemes, subPath), nil
|
||||
case lexemePropertyName:
|
||||
subPath, err := newPath(l)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
childName := strings.TrimPrefix(lx.val, ".")
|
||||
childName = strings.TrimSuffix(childName, propertyName)
|
||||
return propertyNameChildThen(childName, subPath), nil
|
||||
case lexemeBracketPropertyName:
|
||||
subPath, err := newPath(l)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
childNames := strings.TrimSpace(lx.val)
|
||||
childNames = strings.TrimSuffix(childNames, propertyName)
|
||||
childNames = strings.TrimSuffix(strings.TrimPrefix(childNames, "["), "]")
|
||||
childNames = strings.TrimSpace(childNames)
|
||||
return propertyNameBracketChildThen(childNames, subPath), nil
|
||||
case lexemeArraySubscriptPropertyName:
|
||||
subPath, err := newPath(l)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
subscript := strings.TrimSuffix(strings.TrimPrefix(lx.val, "["), "]~")
|
||||
return propertyNameArraySubscriptThen(subscript, subPath), nil
|
||||
}
|
||||
|
||||
return nil, errors.New("invalid path syntax")
|
||||
}
|
||||
|
||||
func identity(node, root *yaml.Node) yit.Iterator {
|
||||
if node.Kind == 0 {
|
||||
return yit.FromNodes()
|
||||
}
|
||||
return yit.FromNode(node)
|
||||
}
|
||||
|
||||
func empty(node, root *yaml.Node) yit.Iterator {
|
||||
return yit.FromNodes()
|
||||
}
|
||||
|
||||
func compose(i yit.Iterator, p *Path, root *yaml.Node) yit.Iterator {
|
||||
its := []yit.Iterator{}
|
||||
for a, ok := i(); ok; a, ok = i() {
|
||||
its = append(its, p.f(a, root))
|
||||
}
|
||||
return yit.FromIterators(its...)
|
||||
}
|
||||
|
||||
func new(f func(node, root *yaml.Node) yit.Iterator) *Path {
|
||||
return &Path{f: f}
|
||||
}
|
||||
|
||||
func propertyNameChildThen(childName string, p *Path) *Path {
|
||||
childName = unescape(childName)
|
||||
|
||||
return new(func(node, root *yaml.Node) yit.Iterator {
|
||||
if node.Kind != yaml.MappingNode {
|
||||
return empty(node, root)
|
||||
}
|
||||
for i, n := range node.Content {
|
||||
if i%2 == 0 && n.Value == childName {
|
||||
return compose(yit.FromNode(node.Content[i]), p, root)
|
||||
}
|
||||
}
|
||||
return empty(node, root)
|
||||
})
|
||||
}
|
||||
|
||||
func propertyNameBracketChildThen(childNames string, p *Path) *Path {
|
||||
unquotedChildren := bracketChildNames(childNames)
|
||||
|
||||
return new(func(node, root *yaml.Node) yit.Iterator {
|
||||
if node.Kind != yaml.MappingNode {
|
||||
return empty(node, root)
|
||||
}
|
||||
its := []yit.Iterator{}
|
||||
for _, childName := range unquotedChildren {
|
||||
for i, n := range node.Content {
|
||||
if i%2 == 0 && n.Value == childName {
|
||||
its = append(its, yit.FromNode(node.Content[i]))
|
||||
}
|
||||
}
|
||||
}
|
||||
return compose(yit.FromIterators(its...), p, root)
|
||||
})
|
||||
}
|
||||
|
||||
func propertyNameArraySubscriptThen(subscript string, p *Path) *Path {
|
||||
return new(func(node, root *yaml.Node) yit.Iterator {
|
||||
if node.Kind == yaml.MappingNode && subscript == "*" {
|
||||
its := []yit.Iterator{}
|
||||
for i, n := range node.Content {
|
||||
if i%2 != 0 {
|
||||
continue // skip child values
|
||||
}
|
||||
its = append(its, compose(yit.FromNode(n), p, root))
|
||||
}
|
||||
return yit.FromIterators(its...)
|
||||
}
|
||||
return empty(node, root)
|
||||
})
|
||||
}
|
||||
|
||||
func childThen(childName string, p *Path) *Path {
|
||||
if childName == "*" {
|
||||
return allChildrenThen(p)
|
||||
}
|
||||
childName = unescape(childName)
|
||||
|
||||
return new(func(node, root *yaml.Node) yit.Iterator {
|
||||
if node.Kind != yaml.MappingNode {
|
||||
return empty(node, root)
|
||||
}
|
||||
for i, n := range node.Content {
|
||||
if i%2 == 0 && n.Value == childName {
|
||||
return compose(yit.FromNode(node.Content[i+1]), p, root)
|
||||
}
|
||||
}
|
||||
return empty(node, root)
|
||||
})
|
||||
}
|
||||
|
||||
func bracketChildNames(childNames string) []string {
|
||||
s := strings.Split(childNames, ",")
|
||||
// reconstitute child names with embedded commas
|
||||
children := []string{}
|
||||
accum := ""
|
||||
for _, c := range s {
|
||||
if balanced(c, '\'') && balanced(c, '"') {
|
||||
if accum != "" {
|
||||
accum += "," + c
|
||||
} else {
|
||||
children = append(children, c)
|
||||
accum = ""
|
||||
}
|
||||
} else {
|
||||
if accum == "" {
|
||||
accum = c
|
||||
} else {
|
||||
accum += "," + c
|
||||
children = append(children, accum)
|
||||
accum = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
if accum != "" {
|
||||
children = append(children, accum)
|
||||
}
|
||||
|
||||
unquotedChildren := []string{}
|
||||
for _, c := range children {
|
||||
c = strings.TrimSpace(c)
|
||||
if strings.HasPrefix(c, "'") {
|
||||
c = strings.TrimSuffix(strings.TrimPrefix(c, "'"), "'")
|
||||
} else {
|
||||
c = strings.TrimSuffix(strings.TrimPrefix(c, `"`), `"`)
|
||||
}
|
||||
c = unescape(c)
|
||||
unquotedChildren = append(unquotedChildren, c)
|
||||
}
|
||||
return unquotedChildren
|
||||
}
|
||||
|
||||
func balanced(c string, q rune) bool {
|
||||
bal := true
|
||||
prev := eof
|
||||
for i := 0; i < len(c); {
|
||||
rune, width := utf8.DecodeRuneInString(c[i:])
|
||||
i += width
|
||||
if rune == q {
|
||||
if i > 0 && prev == '\\' {
|
||||
prev = rune
|
||||
continue
|
||||
}
|
||||
bal = !bal
|
||||
}
|
||||
prev = rune
|
||||
}
|
||||
return bal
|
||||
}
|
||||
|
||||
func bracketChildThen(childNames string, p *Path) *Path {
|
||||
unquotedChildren := bracketChildNames(childNames)
|
||||
|
||||
return new(func(node, root *yaml.Node) yit.Iterator {
|
||||
if node.Kind != yaml.MappingNode {
|
||||
return empty(node, root)
|
||||
}
|
||||
its := []yit.Iterator{}
|
||||
for _, childName := range unquotedChildren {
|
||||
for i, n := range node.Content {
|
||||
if i%2 == 0 && n.Value == childName {
|
||||
its = append(its, yit.FromNode(node.Content[i+1]))
|
||||
}
|
||||
}
|
||||
}
|
||||
return compose(yit.FromIterators(its...), p, root)
|
||||
})
|
||||
}
|
||||
|
||||
func unescape(raw string) string {
|
||||
esc := ""
|
||||
escaped := false
|
||||
for i := 0; i < len(raw); {
|
||||
rune, width := utf8.DecodeRuneInString(raw[i:])
|
||||
i += width
|
||||
if rune == '\\' {
|
||||
if escaped {
|
||||
esc += string(rune)
|
||||
}
|
||||
escaped = !escaped
|
||||
continue
|
||||
}
|
||||
escaped = false
|
||||
esc += string(rune)
|
||||
}
|
||||
|
||||
return esc
|
||||
}
|
||||
|
||||
func allChildrenThen(p *Path) *Path {
|
||||
return new(func(node, root *yaml.Node) yit.Iterator {
|
||||
switch node.Kind {
|
||||
case yaml.MappingNode:
|
||||
its := []yit.Iterator{}
|
||||
for i, n := range node.Content {
|
||||
if i%2 == 0 {
|
||||
continue // skip child names
|
||||
}
|
||||
its = append(its, compose(yit.FromNode(n), p, root))
|
||||
}
|
||||
return yit.FromIterators(its...)
|
||||
|
||||
case yaml.SequenceNode:
|
||||
its := []yit.Iterator{}
|
||||
for i := 0; i < len(node.Content); i++ {
|
||||
its = append(its, compose(yit.FromNode(node.Content[i]), p, root))
|
||||
}
|
||||
return yit.FromIterators(its...)
|
||||
|
||||
default:
|
||||
return empty(node, root)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func arraySubscriptThen(subscript string, p *Path) *Path {
|
||||
return new(func(node, root *yaml.Node) yit.Iterator {
|
||||
if node.Kind == yaml.MappingNode && subscript == "*" {
|
||||
its := []yit.Iterator{}
|
||||
for i, n := range node.Content {
|
||||
if i%2 == 0 {
|
||||
continue // skip child names
|
||||
}
|
||||
its = append(its, compose(yit.FromNode(n), p, root))
|
||||
}
|
||||
return yit.FromIterators(its...)
|
||||
}
|
||||
if node.Kind != yaml.SequenceNode {
|
||||
return empty(node, root)
|
||||
}
|
||||
|
||||
slice, err := slice(subscript, len(node.Content))
|
||||
if err != nil {
|
||||
panic(err) // should not happen, lexer should have detected errors
|
||||
}
|
||||
|
||||
its := []yit.Iterator{}
|
||||
for _, s := range slice {
|
||||
if s >= 0 && s < len(node.Content) {
|
||||
its = append(its, compose(yit.FromNode(node.Content[s]), p, root))
|
||||
}
|
||||
}
|
||||
return yit.FromIterators(its...)
|
||||
})
|
||||
}
|
||||
|
||||
func filterThen(filterLexemes []lexeme, p *Path) *Path {
|
||||
filter := newFilter(newFilterNode(filterLexemes))
|
||||
return new(func(node, root *yaml.Node) yit.Iterator {
|
||||
its := []yit.Iterator{}
|
||||
if node.Kind == yaml.SequenceNode {
|
||||
for _, c := range node.Content {
|
||||
if filter(c, root) {
|
||||
its = append(its, compose(yit.FromNode(c), p, root))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if filter(node, root) {
|
||||
its = append(its, compose(yit.FromNode(node), p, root))
|
||||
}
|
||||
}
|
||||
return yit.FromIterators(its...)
|
||||
})
|
||||
}
|
||||
|
||||
func recursiveFilterThen(filterLexemes []lexeme, p *Path) *Path {
|
||||
filter := newFilter(newFilterNode(filterLexemes))
|
||||
return new(func(node, root *yaml.Node) yit.Iterator {
|
||||
its := []yit.Iterator{}
|
||||
|
||||
if filter(node, root) {
|
||||
its = append(its, compose(yit.FromNode(node), p, root))
|
||||
}
|
||||
return yit.FromIterators(its...)
|
||||
})
|
||||
}
|
||||
143
vendor/github.com/vmware-labs/yaml-jsonpath/pkg/yamlpath/slicer.go
generated
vendored
Normal file
143
vendor/github.com/vmware-labs/yaml-jsonpath/pkg/yamlpath/slicer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,143 @@
|
|||
/*
|
||||
* Copyright 2020 VMware, Inc.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package yamlpath
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func slice(index string, length int) ([]int, error) {
|
||||
if union := strings.Split(index, ","); len(union) > 1 {
|
||||
combination := []int{}
|
||||
for i, idx := range union {
|
||||
sl, err := slice(idx, length)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in union member %d: %s", i, err)
|
||||
}
|
||||
combination = append(combination, sl...)
|
||||
}
|
||||
return combination, nil
|
||||
}
|
||||
|
||||
index = strings.TrimSpace(index)
|
||||
|
||||
if index == "*" {
|
||||
return indices(0, length, 1, length), nil
|
||||
}
|
||||
|
||||
subscr := strings.Split(index, ":")
|
||||
if len(subscr) > 3 {
|
||||
return nil, errors.New("malformed array index, too many colons")
|
||||
}
|
||||
type subscript struct {
|
||||
present bool
|
||||
value int
|
||||
}
|
||||
var subscripts []subscript = []subscript{{false, 0}, {false, 0}, {false, 0}}
|
||||
const (
|
||||
sFrom = iota
|
||||
sTo
|
||||
sStep
|
||||
)
|
||||
for i, s := range subscr {
|
||||
s = strings.TrimSpace(s)
|
||||
if s != "" {
|
||||
n, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
return nil, errors.New("non-integer array index")
|
||||
}
|
||||
subscripts[i] = subscript{
|
||||
present: true,
|
||||
value: n,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// pick out the case of a single subscript first since the "to" value needs special-casing
|
||||
if len(subscr) == 1 {
|
||||
if !subscripts[sFrom].present {
|
||||
return nil, errors.New("array index missing")
|
||||
}
|
||||
from := subscripts[sFrom].value
|
||||
if from < 0 {
|
||||
from += length
|
||||
}
|
||||
return indices(from, from+1, 1, length), nil
|
||||
}
|
||||
|
||||
var from, to, step int
|
||||
|
||||
if subscripts[sStep].present {
|
||||
step = subscripts[sStep].value
|
||||
if step == 0 {
|
||||
return nil, errors.New("array index step value must be non-zero")
|
||||
}
|
||||
} else {
|
||||
step = 1
|
||||
}
|
||||
|
||||
if subscripts[sFrom].present {
|
||||
from = subscripts[sFrom].value
|
||||
if from < 0 {
|
||||
from += length
|
||||
}
|
||||
} else {
|
||||
if step > 0 {
|
||||
from = 0
|
||||
} else {
|
||||
from = length - 1
|
||||
}
|
||||
}
|
||||
|
||||
if subscripts[sTo].present {
|
||||
to = subscripts[sTo].value
|
||||
if to < 0 {
|
||||
to += length
|
||||
}
|
||||
} else {
|
||||
if step > 0 {
|
||||
to = length
|
||||
} else {
|
||||
to = -1
|
||||
}
|
||||
}
|
||||
|
||||
return indices(from, to, step, length), nil
|
||||
}
|
||||
|
||||
func indices(from, to, step, length int) []int {
|
||||
slice := []int{}
|
||||
if step > 0 {
|
||||
if from < 0 {
|
||||
from = 0 // avoid CPU attack
|
||||
}
|
||||
if to > length {
|
||||
to = length // avoid CPU attack
|
||||
}
|
||||
for i := from; i < to; i += step {
|
||||
if 0 <= i && i < length {
|
||||
slice = append(slice, i)
|
||||
}
|
||||
}
|
||||
} else if step < 0 {
|
||||
if from > length {
|
||||
from = length // avoid CPU attack
|
||||
}
|
||||
if to < -1 {
|
||||
to = -1 // avoid CPU attack
|
||||
}
|
||||
for i := from; i > to; i += step {
|
||||
if 0 <= i && i < length {
|
||||
slice = append(slice, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
return slice
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue