go.mod: update to images@v0.117.0

This commit updates to images v0.117.0 so that the cross-distro.sh
test works again (images removed fedora-39.json in main but the
uses the previous version of images that includes fedora-39 so
there is a mismatch (we should look into if there is a way to
get github.com/osbuild/images@latest instead of main in the
cross-arch test).

It also updates all the vendor stuff that got pulled via the
new images release (which is giantonormous).

This update requires updating the Go version to 1.22.8
This commit is contained in:
Michael Vogt 2025-02-14 11:08:48 +01:00 committed by Achilleas Koutsou
parent 886ddc0bcc
commit 409b4f6048
584 changed files with 60776 additions and 50181 deletions

View file

@ -514,7 +514,11 @@ func (c *Client) Accept(ctx context.Context, chal *Challenge) (*Challenge, error
return nil, err
}
res, err := c.post(ctx, nil, chal.URI, json.RawMessage("{}"), wantStatus(
payload := json.RawMessage("{}")
if len(chal.Payload) != 0 {
payload = chal.Payload
}
res, err := c.post(ctx, nil, chal.URI, payload, wantStatus(
http.StatusOK, // according to the spec
http.StatusAccepted, // Let's Encrypt: see https://goo.gl/WsJ7VT (acme-divergences.md)
))

View file

@ -7,6 +7,7 @@ package acme
import (
"crypto"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"net/http"
@ -527,6 +528,16 @@ type Challenge struct {
// when this challenge was used.
// The type of a non-nil value is *Error.
Error error
// Payload is the JSON-formatted payload that the client sends
// to the server to indicate it is ready to respond to the challenge.
// When unset, it defaults to an empty JSON object: {}.
// For most challenges, the client must not set Payload,
// see https://tools.ietf.org/html/rfc8555#section-7.5.1.
// Payload is used only for newer challenges (such as "device-attest-01")
// where the client must send additional data for the server to validate
// the challenge.
Payload json.RawMessage
}
// wireChallenge is ACME JSON challenge representation.

View file

@ -1,50 +0,0 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package constraints defines a set of useful constraints to be used
// with type parameters.
package constraints
// Signed is a constraint that permits any signed integer type.
// If future releases of Go add new predeclared signed integer types,
// this constraint will be modified to include them.
type Signed interface {
~int | ~int8 | ~int16 | ~int32 | ~int64
}
// Unsigned is a constraint that permits any unsigned integer type.
// If future releases of Go add new predeclared unsigned integer types,
// this constraint will be modified to include them.
type Unsigned interface {
~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
}
// Integer is a constraint that permits any integer type.
// If future releases of Go add new predeclared integer types,
// this constraint will be modified to include them.
type Integer interface {
Signed | Unsigned
}
// Float is a constraint that permits any floating-point type.
// If future releases of Go add new predeclared floating-point types,
// this constraint will be modified to include them.
type Float interface {
~float32 | ~float64
}
// Complex is a constraint that permits any complex numeric type.
// If future releases of Go add new predeclared complex numeric types,
// this constraint will be modified to include them.
type Complex interface {
~complex64 | ~complex128
}
// Ordered is a constraint that permits any ordered type: any type
// that supports the operators < <= >= >.
// If future releases of Go add new ordered types,
// this constraint will be modified to include them.
type Ordered interface {
Integer | Float | ~string
}

58
vendor/golang.org/x/exp/maps/maps.go generated vendored
View file

@ -5,9 +5,20 @@
// Package maps defines various functions useful with maps of any type.
package maps
import "maps"
// TODO(adonovan): when https://go.dev/issue/32816 is accepted, all of
// these functions except Keys and Values should be annotated
// (provisionally with "//go:fix inline") so that tools can safely and
// automatically replace calls to exp/maps with calls to std maps by
// inlining them.
// Keys returns the keys of the map m.
// The keys will be in an indeterminate order.
func Keys[M ~map[K]V, K comparable, V any](m M) []K {
// The simplest true equivalent using std is:
// return slices.AppendSeq(make([]K, 0, len(m)), maps.Keys(m)).
r := make([]K, 0, len(m))
for k := range m {
r = append(r, k)
@ -18,6 +29,9 @@ func Keys[M ~map[K]V, K comparable, V any](m M) []K {
// Values returns the values of the map m.
// The values will be in an indeterminate order.
func Values[M ~map[K]V, K comparable, V any](m M) []V {
// The simplest true equivalent using std is:
// return slices.AppendSeq(make([]V, 0, len(m)), maps.Values(m)).
r := make([]V, 0, len(m))
for _, v := range m {
r = append(r, v)
@ -28,50 +42,24 @@ func Values[M ~map[K]V, K comparable, V any](m M) []V {
// Equal reports whether two maps contain the same key/value pairs.
// Values are compared using ==.
func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool {
if len(m1) != len(m2) {
return false
}
for k, v1 := range m1 {
if v2, ok := m2[k]; !ok || v1 != v2 {
return false
}
}
return true
return maps.Equal(m1, m2)
}
// EqualFunc is like Equal, but compares values using eq.
// Keys are still compared with ==.
func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool {
if len(m1) != len(m2) {
return false
}
for k, v1 := range m1 {
if v2, ok := m2[k]; !ok || !eq(v1, v2) {
return false
}
}
return true
return maps.EqualFunc(m1, m2, eq)
}
// Clear removes all entries from m, leaving it empty.
func Clear[M ~map[K]V, K comparable, V any](m M) {
for k := range m {
delete(m, k)
}
clear(m)
}
// Clone returns a copy of m. This is a shallow clone:
// the new keys and values are set using ordinary assignment.
func Clone[M ~map[K]V, K comparable, V any](m M) M {
// Preserve nil in case it matters.
if m == nil {
return nil
}
r := make(M, len(m))
for k, v := range m {
r[k] = v
}
return r
return maps.Clone(m)
}
// Copy copies all key/value pairs in src adding them to dst.
@ -79,16 +67,10 @@ func Clone[M ~map[K]V, K comparable, V any](m M) M {
// the value in dst will be overwritten by the value associated
// with the key in src.
func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) {
for k, v := range src {
dst[k] = v
}
maps.Copy(dst, src)
}
// DeleteFunc deletes any key/value pairs from m for which del returns true.
func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) {
for k, v := range m {
if del(k, v) {
delete(m, k)
}
}
maps.DeleteFunc(m, del)
}

View file

@ -1,44 +0,0 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package slices
import "golang.org/x/exp/constraints"
// min is a version of the predeclared function from the Go 1.21 release.
func min[T constraints.Ordered](a, b T) T {
if a < b || isNaN(a) {
return a
}
return b
}
// max is a version of the predeclared function from the Go 1.21 release.
func max[T constraints.Ordered](a, b T) T {
if a > b || isNaN(a) {
return a
}
return b
}
// cmpLess is a copy of cmp.Less from the Go 1.21 release.
func cmpLess[T constraints.Ordered](x, y T) bool {
return (isNaN(x) && !isNaN(y)) || x < y
}
// cmpCompare is a copy of cmp.Compare from the Go 1.21 release.
func cmpCompare[T constraints.Ordered](x, y T) int {
xNaN := isNaN(x)
yNaN := isNaN(y)
if xNaN && yNaN {
return 0
}
if xNaN || x < y {
return -1
}
if yNaN || x > y {
return +1
}
return 0
}

View file

@ -6,26 +6,22 @@
package slices
import (
"unsafe"
"golang.org/x/exp/constraints"
"cmp"
"slices"
)
// TODO(adonovan): when https://go.dev/issue/32816 is accepted, all of
// these functions should be annotated (provisionally with "//go:fix
// inline") so that tools can safely and automatically replace calls
// to exp/slices with calls to std slices by inlining them.
// Equal reports whether two slices are equal: the same length and all
// elements equal. If the lengths are different, Equal returns false.
// Otherwise, the elements are compared in increasing index order, and the
// comparison stops at the first unequal pair.
// Floating point NaNs are not considered equal.
func Equal[S ~[]E, E comparable](s1, s2 S) bool {
if len(s1) != len(s2) {
return false
}
for i := range s1 {
if s1[i] != s2[i] {
return false
}
}
return true
return slices.Equal(s1, s2)
}
// EqualFunc reports whether two slices are equal using an equality
@ -34,16 +30,7 @@ func Equal[S ~[]E, E comparable](s1, s2 S) bool {
// increasing index order, and the comparison stops at the first index
// for which eq returns false.
func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool {
if len(s1) != len(s2) {
return false
}
for i, v1 := range s1 {
v2 := s2[i]
if !eq(v1, v2) {
return false
}
}
return true
return slices.EqualFunc(s1, s2, eq)
}
// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair
@ -53,20 +40,8 @@ func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) boo
// If both slices are equal until one of them ends, the shorter slice is
// considered less than the longer one.
// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int {
for i, v1 := range s1 {
if i >= len(s2) {
return +1
}
v2 := s2[i]
if c := cmpCompare(v1, v2); c != 0 {
return c
}
}
if len(s1) < len(s2) {
return -1
}
return 0
func Compare[S ~[]E, E cmp.Ordered](s1, s2 S) int {
return slices.Compare(s1, s2)
}
// CompareFunc is like [Compare] but uses a custom comparison function on each
@ -75,52 +50,30 @@ func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int {
// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
// and +1 if len(s1) > len(s2).
func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int {
for i, v1 := range s1 {
if i >= len(s2) {
return +1
}
v2 := s2[i]
if c := cmp(v1, v2); c != 0 {
return c
}
}
if len(s1) < len(s2) {
return -1
}
return 0
return slices.CompareFunc(s1, s2, cmp)
}
// Index returns the index of the first occurrence of v in s,
// or -1 if not present.
func Index[S ~[]E, E comparable](s S, v E) int {
for i := range s {
if v == s[i] {
return i
}
}
return -1
return slices.Index(s, v)
}
// IndexFunc returns the first index i satisfying f(s[i]),
// or -1 if none do.
func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int {
for i := range s {
if f(s[i]) {
return i
}
}
return -1
return slices.IndexFunc(s, f)
}
// Contains reports whether v is present in s.
func Contains[S ~[]E, E comparable](s S, v E) bool {
return Index(s, v) >= 0
return slices.Contains(s, v)
}
// ContainsFunc reports whether at least one
// element e of s satisfies f(e).
func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool {
return IndexFunc(s, f) >= 0
return slices.ContainsFunc(s, f)
}
// Insert inserts the values v... into s at index i,
@ -131,92 +84,7 @@ func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool {
// Insert panics if i is out of range.
// This function is O(len(s) + len(v)).
func Insert[S ~[]E, E any](s S, i int, v ...E) S {
m := len(v)
if m == 0 {
return s
}
n := len(s)
if i == n {
return append(s, v...)
}
if n+m > cap(s) {
// Use append rather than make so that we bump the size of
// the slice up to the next storage class.
// This is what Grow does but we don't call Grow because
// that might copy the values twice.
s2 := append(s[:i], make(S, n+m-i)...)
copy(s2[i:], v)
copy(s2[i+m:], s[i:])
return s2
}
s = s[:n+m]
// before:
// s: aaaaaaaabbbbccccccccdddd
// ^ ^ ^ ^
// i i+m n n+m
// after:
// s: aaaaaaaavvvvbbbbcccccccc
// ^ ^ ^ ^
// i i+m n n+m
//
// a are the values that don't move in s.
// v are the values copied in from v.
// b and c are the values from s that are shifted up in index.
// d are the values that get overwritten, never to be seen again.
if !overlaps(v, s[i+m:]) {
// Easy case - v does not overlap either the c or d regions.
// (It might be in some of a or b, or elsewhere entirely.)
// The data we copy up doesn't write to v at all, so just do it.
copy(s[i+m:], s[i:])
// Now we have
// s: aaaaaaaabbbbbbbbcccccccc
// ^ ^ ^ ^
// i i+m n n+m
// Note the b values are duplicated.
copy(s[i:], v)
// Now we have
// s: aaaaaaaavvvvbbbbcccccccc
// ^ ^ ^ ^
// i i+m n n+m
// That's the result we want.
return s
}
// The hard case - v overlaps c or d. We can't just shift up
// the data because we'd move or clobber the values we're trying
// to insert.
// So instead, write v on top of d, then rotate.
copy(s[n:], v)
// Now we have
// s: aaaaaaaabbbbccccccccvvvv
// ^ ^ ^ ^
// i i+m n n+m
rotateRight(s[i:], m)
// Now we have
// s: aaaaaaaavvvvbbbbcccccccc
// ^ ^ ^ ^
// i i+m n n+m
// That's the result we want.
return s
}
// clearSlice sets all elements up to the length of s to the zero value of E.
// We may use the builtin clear func instead, and remove clearSlice, when upgrading
// to Go 1.21+.
func clearSlice[S ~[]E, E any](s S) {
var zero E
for i := range s {
s[i] = zero
}
return slices.Insert(s, i, v...)
}
// Delete removes the elements s[i:j] from s, returning the modified slice.
@ -225,135 +93,27 @@ func clearSlice[S ~[]E, E any](s S) {
// make a single call deleting them all together than to delete one at a time.
// Delete zeroes the elements s[len(s)-(j-i):len(s)].
func Delete[S ~[]E, E any](s S, i, j int) S {
_ = s[i:j:len(s)] // bounds check
if i == j {
return s
}
oldlen := len(s)
s = append(s[:i], s[j:]...)
clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC
return s
return slices.Delete(s, i, j)
}
// DeleteFunc removes any elements from s for which del returns true,
// returning the modified slice.
// DeleteFunc zeroes the elements between the new length and the original length.
func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
i := IndexFunc(s, del)
if i == -1 {
return s
}
// Don't start copying elements until we find one to delete.
for j := i + 1; j < len(s); j++ {
if v := s[j]; !del(v) {
s[i] = v
i++
}
}
clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
return s[:i]
return slices.DeleteFunc(s, del)
}
// Replace replaces the elements s[i:j] by the given v, and returns the
// modified slice. Replace panics if s[i:j] is not a valid slice of s.
// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length.
func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
_ = s[i:j] // verify that i:j is a valid subslice
if i == j {
return Insert(s, i, v...)
}
if j == len(s) {
return append(s[:i], v...)
}
tot := len(s[:i]) + len(v) + len(s[j:])
if tot > cap(s) {
// Too big to fit, allocate and copy over.
s2 := append(s[:i], make(S, tot-i)...) // See Insert
copy(s2[i:], v)
copy(s2[i+len(v):], s[j:])
return s2
}
r := s[:tot]
if i+len(v) <= j {
// Easy, as v fits in the deleted portion.
copy(r[i:], v)
if i+len(v) != j {
copy(r[i+len(v):], s[j:])
}
clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC
return r
}
// We are expanding (v is bigger than j-i).
// The situation is something like this:
// (example has i=4,j=8,len(s)=16,len(v)=6)
// s: aaaaxxxxbbbbbbbbyy
// ^ ^ ^ ^
// i j len(s) tot
// a: prefix of s
// x: deleted range
// b: more of s
// y: area to expand into
if !overlaps(r[i+len(v):], v) {
// Easy, as v is not clobbered by the first copy.
copy(r[i+len(v):], s[j:])
copy(r[i:], v)
return r
}
// This is a situation where we don't have a single place to which
// we can copy v. Parts of it need to go to two different places.
// We want to copy the prefix of v into y and the suffix into x, then
// rotate |y| spots to the right.
//
// v[2:] v[:2]
// | |
// s: aaaavvvvbbbbbbbbvv
// ^ ^ ^ ^
// i j len(s) tot
//
// If either of those two destinations don't alias v, then we're good.
y := len(v) - (j - i) // length of y portion
if !overlaps(r[i:j], v) {
copy(r[i:j], v[y:])
copy(r[len(s):], v[:y])
rotateRight(r[i:], y)
return r
}
if !overlaps(r[len(s):], v) {
copy(r[len(s):], v[:y])
copy(r[i:j], v[y:])
rotateRight(r[i:], y)
return r
}
// Now we know that v overlaps both x and y.
// That means that the entirety of b is *inside* v.
// So we don't need to preserve b at all; instead we
// can copy v first, then copy the b part of v out of
// v to the right destination.
k := startIdx(v, s[j:])
copy(r[i:], v)
copy(r[i+len(v):], r[i+k:])
return r
return slices.Replace(s, i, j, v...)
}
// Clone returns a copy of the slice.
// The elements are copied using assignment, so this is a shallow clone.
func Clone[S ~[]E, E any](s S) S {
// Preserve nil in case it matters.
if s == nil {
return nil
}
return append(S([]E{}), s...)
return slices.Clone(s)
}
// Compact replaces consecutive runs of equal elements with a single copy.
@ -362,40 +122,14 @@ func Clone[S ~[]E, E any](s S) S {
// which may have a smaller length.
// Compact zeroes the elements between the new length and the original length.
func Compact[S ~[]E, E comparable](s S) S {
if len(s) < 2 {
return s
}
i := 1
for k := 1; k < len(s); k++ {
if s[k] != s[k-1] {
if i != k {
s[i] = s[k]
}
i++
}
}
clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
return s[:i]
return slices.Compact(s)
}
// CompactFunc is like [Compact] but uses an equality function to compare elements.
// For runs of elements that compare equal, CompactFunc keeps the first one.
// CompactFunc zeroes the elements between the new length and the original length.
func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
if len(s) < 2 {
return s
}
i := 1
for k := 1; k < len(s); k++ {
if !eq(s[k], s[k-1]) {
if i != k {
s[i] = s[k]
}
i++
}
}
clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
return s[:i]
return slices.CompactFunc(s, eq)
}
// Grow increases the slice's capacity, if necessary, to guarantee space for
@ -403,113 +137,15 @@ func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
// to the slice without another allocation. If n is negative or too large to
// allocate the memory, Grow panics.
func Grow[S ~[]E, E any](s S, n int) S {
if n < 0 {
panic("cannot be negative")
}
if n -= cap(s) - len(s); n > 0 {
// TODO(https://go.dev/issue/53888): Make using []E instead of S
// to workaround a compiler bug where the runtime.growslice optimization
// does not take effect. Revert when the compiler is fixed.
s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)]
}
return s
return slices.Grow(s, n)
}
// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
func Clip[S ~[]E, E any](s S) S {
return s[:len(s):len(s)]
}
// Rotation algorithm explanation:
//
// rotate left by 2
// start with
// 0123456789
// split up like this
// 01 234567 89
// swap first 2 and last 2
// 89 234567 01
// join first parts
// 89234567 01
// recursively rotate first left part by 2
// 23456789 01
// join at the end
// 2345678901
//
// rotate left by 8
// start with
// 0123456789
// split up like this
// 01 234567 89
// swap first 2 and last 2
// 89 234567 01
// join last parts
// 89 23456701
// recursively rotate second part left by 6
// 89 01234567
// join at the end
// 8901234567
// TODO: There are other rotate algorithms.
// This algorithm has the desirable property that it moves each element exactly twice.
// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes.
// The follow-cycles algorithm can be 1-write but it is not very cache friendly.
// rotateLeft rotates b left by n spaces.
// s_final[i] = s_orig[i+r], wrapping around.
func rotateLeft[E any](s []E, r int) {
for r != 0 && r != len(s) {
if r*2 <= len(s) {
swap(s[:r], s[len(s)-r:])
s = s[:len(s)-r]
} else {
swap(s[:len(s)-r], s[r:])
s, r = s[len(s)-r:], r*2-len(s)
}
}
}
func rotateRight[E any](s []E, r int) {
rotateLeft(s, len(s)-r)
}
// swap swaps the contents of x and y. x and y must be equal length and disjoint.
func swap[E any](x, y []E) {
for i := 0; i < len(x); i++ {
x[i], y[i] = y[i], x[i]
}
}
// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap.
func overlaps[E any](a, b []E) bool {
if len(a) == 0 || len(b) == 0 {
return false
}
elemSize := unsafe.Sizeof(a[0])
if elemSize == 0 {
return false
}
// TODO: use a runtime/unsafe facility once one becomes available. See issue 12445.
// Also see crypto/internal/alias/alias.go:AnyOverlap
return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) &&
uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1)
}
// startIdx returns the index in haystack where the needle starts.
// prerequisite: the needle must be aliased entirely inside the haystack.
func startIdx[E any](haystack, needle []E) int {
p := &needle[0]
for i := range haystack {
if p == &haystack[i] {
return i
}
}
// TODO: what if the overlap is by a non-integral number of Es?
panic("needle not found")
return slices.Clip(s)
}
// Reverse reverses the elements of the slice in place.
func Reverse[S ~[]E, E any](s S) {
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
s[i], s[j] = s[j], s[i]
}
slices.Reverse(s)
}

View file

@ -2,21 +2,20 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp
package slices
import (
"math/bits"
"golang.org/x/exp/constraints"
"cmp"
"slices"
)
// TODO(adonovan): add a "//go:fix inline" annotation to each function
// in this file; see https://go.dev/issue/32816.
// Sort sorts a slice of any ordered type in ascending order.
// When sorting floating-point numbers, NaNs are ordered before other values.
func Sort[S ~[]E, E constraints.Ordered](x S) {
n := len(x)
pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
func Sort[S ~[]E, E cmp.Ordered](x S) {
slices.Sort(x)
}
// SortFunc sorts the slice x in ascending order as determined by the cmp
@ -29,118 +28,60 @@ func Sort[S ~[]E, E constraints.Ordered](x S) {
// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
// To indicate 'uncomparable', return 0 from the function.
func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
n := len(x)
pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp)
slices.SortFunc(x, cmp)
}
// SortStableFunc sorts the slice x while keeping the original order of equal
// elements, using cmp to compare elements in the same way as [SortFunc].
func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
stableCmpFunc(x, len(x), cmp)
slices.SortStableFunc(x, cmp)
}
// IsSorted reports whether x is sorted in ascending order.
func IsSorted[S ~[]E, E constraints.Ordered](x S) bool {
for i := len(x) - 1; i > 0; i-- {
if cmpLess(x[i], x[i-1]) {
return false
}
}
return true
func IsSorted[S ~[]E, E cmp.Ordered](x S) bool {
return slices.IsSorted(x)
}
// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the
// comparison function as defined by [SortFunc].
func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool {
for i := len(x) - 1; i > 0; i-- {
if cmp(x[i], x[i-1]) < 0 {
return false
}
}
return true
return slices.IsSortedFunc(x, cmp)
}
// Min returns the minimal value in x. It panics if x is empty.
// For floating-point numbers, Min propagates NaNs (any NaN value in x
// forces the output to be NaN).
func Min[S ~[]E, E constraints.Ordered](x S) E {
if len(x) < 1 {
panic("slices.Min: empty list")
}
m := x[0]
for i := 1; i < len(x); i++ {
m = min(m, x[i])
}
return m
func Min[S ~[]E, E cmp.Ordered](x S) E {
return slices.Min(x)
}
// MinFunc returns the minimal value in x, using cmp to compare elements.
// It panics if x is empty. If there is more than one minimal element
// according to the cmp function, MinFunc returns the first one.
func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
if len(x) < 1 {
panic("slices.MinFunc: empty list")
}
m := x[0]
for i := 1; i < len(x); i++ {
if cmp(x[i], m) < 0 {
m = x[i]
}
}
return m
return slices.MinFunc(x, cmp)
}
// Max returns the maximal value in x. It panics if x is empty.
// For floating-point E, Max propagates NaNs (any NaN value in x
// forces the output to be NaN).
func Max[S ~[]E, E constraints.Ordered](x S) E {
if len(x) < 1 {
panic("slices.Max: empty list")
}
m := x[0]
for i := 1; i < len(x); i++ {
m = max(m, x[i])
}
return m
func Max[S ~[]E, E cmp.Ordered](x S) E {
return slices.Max(x)
}
// MaxFunc returns the maximal value in x, using cmp to compare elements.
// It panics if x is empty. If there is more than one maximal element
// according to the cmp function, MaxFunc returns the first one.
func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
if len(x) < 1 {
panic("slices.MaxFunc: empty list")
}
m := x[0]
for i := 1; i < len(x); i++ {
if cmp(x[i], m) > 0 {
m = x[i]
}
}
return m
return slices.MaxFunc(x, cmp)
}
// BinarySearch searches for target in a sorted slice and returns the position
// where target is found, or the position where target would appear in the
// sort order; it also returns a bool saying whether the target is really found
// in the slice. The slice must be sorted in increasing order.
func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) {
// Inlining is faster than calling BinarySearchFunc with a lambda.
n := len(x)
// Define x[-1] < target and x[n] >= target.
// Invariant: x[i-1] < target, x[j] >= target.
i, j := 0, n
for i < j {
h := int(uint(i+j) >> 1) // avoid overflow when computing h
// i ≤ h < j
if cmpLess(x[h], target) {
i = h + 1 // preserves x[i-1] < target
} else {
j = h // preserves x[j] >= target
}
}
// i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target)))
func BinarySearch[S ~[]E, E cmp.Ordered](x S, target E) (int, bool) {
return slices.BinarySearch(x, target)
}
// BinarySearchFunc works like [BinarySearch], but uses a custom comparison
@ -151,47 +92,5 @@ func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) {
// cmp must implement the same ordering as the slice, such that if
// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice.
func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) {
n := len(x)
// Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
// Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
i, j := 0, n
for i < j {
h := int(uint(i+j) >> 1) // avoid overflow when computing h
// i ≤ h < j
if cmp(x[h], target) < 0 {
i = h + 1 // preserves cmp(x[i - 1], target) < 0
} else {
j = h // preserves cmp(x[j], target) >= 0
}
}
// i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i.
return i, i < n && cmp(x[i], target) == 0
}
type sortedHint int // hint for pdqsort when choosing the pivot
const (
unknownHint sortedHint = iota
increasingHint
decreasingHint
)
// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
type xorshift uint64
func (r *xorshift) Next() uint64 {
*r ^= *r << 13
*r ^= *r >> 17
*r ^= *r << 5
return uint64(*r)
}
func nextPowerOfTwo(length int) uint {
return 1 << bits.Len(uint(length))
}
// isNaN reports whether x is a NaN without requiring the math package.
// This will always return false if T is not floating-point.
func isNaN[T constraints.Ordered](x T) bool {
return x != x
return slices.BinarySearchFunc(x, target, cmp)
}

View file

@ -1,479 +0,0 @@
// Code generated by gen_sort_variants.go; DO NOT EDIT.
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package slices
// insertionSortCmpFunc sorts data[a:b] using insertion sort.
func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
for i := a + 1; i < b; i++ {
for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- {
data[j], data[j-1] = data[j-1], data[j]
}
}
}
// siftDownCmpFunc implements the heap property on data[lo:hi].
// first is an offset into the array where the root of the heap lies.
func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) {
root := lo
for {
child := 2*root + 1
if child >= hi {
break
}
if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) {
child++
}
if !(cmp(data[first+root], data[first+child]) < 0) {
return
}
data[first+root], data[first+child] = data[first+child], data[first+root]
root = child
}
}
func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
first := a
lo := 0
hi := b - a
// Build heap with greatest element at top.
for i := (hi - 1) / 2; i >= 0; i-- {
siftDownCmpFunc(data, i, hi, first, cmp)
}
// Pop elements, largest first, into end of data.
for i := hi - 1; i >= 0; i-- {
data[first], data[first+i] = data[first+i], data[first]
siftDownCmpFunc(data, lo, i, first, cmp)
}
}
// pdqsortCmpFunc sorts data[a:b].
// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
// C++ implementation: https://github.com/orlp/pdqsort
// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) {
const maxInsertion = 12
var (
wasBalanced = true // whether the last partitioning was reasonably balanced
wasPartitioned = true // whether the slice was already partitioned
)
for {
length := b - a
if length <= maxInsertion {
insertionSortCmpFunc(data, a, b, cmp)
return
}
// Fall back to heapsort if too many bad choices were made.
if limit == 0 {
heapSortCmpFunc(data, a, b, cmp)
return
}
// If the last partitioning was imbalanced, we need to breaking patterns.
if !wasBalanced {
breakPatternsCmpFunc(data, a, b, cmp)
limit--
}
pivot, hint := choosePivotCmpFunc(data, a, b, cmp)
if hint == decreasingHint {
reverseRangeCmpFunc(data, a, b, cmp)
// The chosen pivot was pivot-a elements after the start of the array.
// After reversing it is pivot-a elements before the end of the array.
// The idea came from Rust's implementation.
pivot = (b - 1) - (pivot - a)
hint = increasingHint
}
// The slice is likely already sorted.
if wasBalanced && wasPartitioned && hint == increasingHint {
if partialInsertionSortCmpFunc(data, a, b, cmp) {
return
}
}
// Probably the slice contains many duplicate elements, partition the slice into
// elements equal to and elements greater than the pivot.
if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) {
mid := partitionEqualCmpFunc(data, a, b, pivot, cmp)
a = mid
continue
}
mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp)
wasPartitioned = alreadyPartitioned
leftLen, rightLen := mid-a, b-mid
balanceThreshold := length / 8
if leftLen < rightLen {
wasBalanced = leftLen >= balanceThreshold
pdqsortCmpFunc(data, a, mid, limit, cmp)
a = mid + 1
} else {
wasBalanced = rightLen >= balanceThreshold
pdqsortCmpFunc(data, mid+1, b, limit, cmp)
b = mid
}
}
}
// partitionCmpFunc does one quicksort partition.
// Let p = data[pivot]
// Moves elements in data[a:b] around, so that data[i]<p and data[j]>=p for i<newpivot and j>newpivot.
// On return, data[newpivot] = p
func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) {
data[a], data[pivot] = data[pivot], data[a]
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
for i <= j && (cmp(data[i], data[a]) < 0) {
i++
}
for i <= j && !(cmp(data[j], data[a]) < 0) {
j--
}
if i > j {
data[j], data[a] = data[a], data[j]
return j, true
}
data[i], data[j] = data[j], data[i]
i++
j--
for {
for i <= j && (cmp(data[i], data[a]) < 0) {
i++
}
for i <= j && !(cmp(data[j], data[a]) < 0) {
j--
}
if i > j {
break
}
data[i], data[j] = data[j], data[i]
i++
j--
}
data[j], data[a] = data[a], data[j]
return j, false
}
// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) {
data[a], data[pivot] = data[pivot], data[a]
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
for {
for i <= j && !(cmp(data[a], data[i]) < 0) {
i++
}
for i <= j && (cmp(data[a], data[j]) < 0) {
j--
}
if i > j {
break
}
data[i], data[j] = data[j], data[i]
i++
j--
}
return i
}
// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end.
func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool {
const (
maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
shortestShifting = 50 // don't shift any elements on short arrays
)
i := a + 1
for j := 0; j < maxSteps; j++ {
for i < b && !(cmp(data[i], data[i-1]) < 0) {
i++
}
if i == b {
return true
}
if b-a < shortestShifting {
return false
}
data[i], data[i-1] = data[i-1], data[i]
// Shift the smaller one to the left.
if i-a >= 2 {
for j := i - 1; j >= 1; j-- {
if !(cmp(data[j], data[j-1]) < 0) {
break
}
data[j], data[j-1] = data[j-1], data[j]
}
}
// Shift the greater one to the right.
if b-i >= 2 {
for j := i + 1; j < b; j++ {
if !(cmp(data[j], data[j-1]) < 0) {
break
}
data[j], data[j-1] = data[j-1], data[j]
}
}
}
return false
}
// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns
// that might cause imbalanced partitions in quicksort.
func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
length := b - a
if length >= 8 {
random := xorshift(length)
modulus := nextPowerOfTwo(length)
for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
other := int(uint(random.Next()) & (modulus - 1))
if other >= length {
other -= length
}
data[idx], data[a+other] = data[a+other], data[idx]
}
}
}
// choosePivotCmpFunc chooses a pivot in data[a:b].
//
// [0,8): chooses a static pivot.
// [8,shortestNinther): uses the simple median-of-three method.
// [shortestNinther,∞): uses the Tukey ninther method.
func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) {
const (
shortestNinther = 50
maxSwaps = 4 * 3
)
l := b - a
var (
swaps int
i = a + l/4*1
j = a + l/4*2
k = a + l/4*3
)
if l >= 8 {
if l >= shortestNinther {
// Tukey ninther method, the idea came from Rust's implementation.
i = medianAdjacentCmpFunc(data, i, &swaps, cmp)
j = medianAdjacentCmpFunc(data, j, &swaps, cmp)
k = medianAdjacentCmpFunc(data, k, &swaps, cmp)
}
// Find the median among i, j, k and stores it into j.
j = medianCmpFunc(data, i, j, k, &swaps, cmp)
}
switch swaps {
case 0:
return j, increasingHint
case maxSwaps:
return j, decreasingHint
default:
return j, unknownHint
}
}
// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) {
if cmp(data[b], data[a]) < 0 {
*swaps++
return b, a
}
return a, b
}
// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int {
a, b = order2CmpFunc(data, a, b, swaps, cmp)
b, c = order2CmpFunc(data, b, c, swaps, cmp)
a, b = order2CmpFunc(data, a, b, swaps, cmp)
return b
}
// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int {
return medianCmpFunc(data, a-1, a, a+1, swaps, cmp)
}
func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
i := a
j := b - 1
for i < j {
data[i], data[j] = data[j], data[i]
i++
j--
}
}
func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) {
for i := 0; i < n; i++ {
data[a+i], data[b+i] = data[b+i], data[a+i]
}
}
func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) {
blockSize := 20 // must be > 0
a, b := 0, blockSize
for b <= n {
insertionSortCmpFunc(data, a, b, cmp)
a = b
b += blockSize
}
insertionSortCmpFunc(data, a, n, cmp)
for blockSize < n {
a, b = 0, 2*blockSize
for b <= n {
symMergeCmpFunc(data, a, a+blockSize, b, cmp)
a = b
b += 2 * blockSize
}
if m := a + blockSize; m < n {
symMergeCmpFunc(data, a, m, n, cmp)
}
blockSize *= 2
}
}
// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using
// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
// Computer Science, pages 714-723. Springer, 2004.
//
// Let M = m-a and N = b-n. Wolog M < N.
// The recursion depth is bound by ceil(log(N+M)).
// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
//
// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
// in the paper carries through for Swap operations, especially as the block
// swapping rotate uses only O(M+N) Swaps.
//
// symMerge assumes non-degenerate arguments: a < m && m < b.
// Having the caller check this condition eliminates many leaf recursion calls,
// which improves performance.
func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
// Avoid unnecessary recursions of symMerge
// by direct insertion of data[a] into data[m:b]
// if data[a:m] only contains one element.
if m-a == 1 {
// Use binary search to find the lowest index i
// such that data[i] >= data[a] for m <= i < b.
// Exit the search loop with i == b in case no such index exists.
i := m
j := b
for i < j {
h := int(uint(i+j) >> 1)
if cmp(data[h], data[a]) < 0 {
i = h + 1
} else {
j = h
}
}
// Swap values until data[a] reaches the position before i.
for k := a; k < i-1; k++ {
data[k], data[k+1] = data[k+1], data[k]
}
return
}
// Avoid unnecessary recursions of symMerge
// by direct insertion of data[m] into data[a:m]
// if data[m:b] only contains one element.
if b-m == 1 {
// Use binary search to find the lowest index i
// such that data[i] > data[m] for a <= i < m.
// Exit the search loop with i == m in case no such index exists.
i := a
j := m
for i < j {
h := int(uint(i+j) >> 1)
if !(cmp(data[m], data[h]) < 0) {
i = h + 1
} else {
j = h
}
}
// Swap values until data[m] reaches the position i.
for k := m; k > i; k-- {
data[k], data[k-1] = data[k-1], data[k]
}
return
}
mid := int(uint(a+b) >> 1)
n := mid + m
var start, r int
if m > mid {
start = n - b
r = mid
} else {
start = a
r = m
}
p := n - 1
for start < r {
c := int(uint(start+r) >> 1)
if !(cmp(data[p-c], data[c]) < 0) {
start = c + 1
} else {
r = c
}
}
end := n - start
if start < m && m < end {
rotateCmpFunc(data, start, m, end, cmp)
}
if a < start && start < mid {
symMergeCmpFunc(data, a, start, mid, cmp)
}
if mid < end && end < b {
symMergeCmpFunc(data, mid, end, b, cmp)
}
}
// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
// Data of the form 'x u v y' is changed to 'x v u y'.
// rotate performs at most b-a many calls to data.Swap,
// and it assumes non-degenerate arguments: a < m && m < b.
func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
i := m - a
j := b - m
for i != j {
if i > j {
swapRangeCmpFunc(data, m-i, m, j, cmp)
i -= j
} else {
swapRangeCmpFunc(data, m-i, m+j-i, i, cmp)
j -= i
}
}
// i == j
swapRangeCmpFunc(data, m-i, m, i, cmp)
}

View file

@ -1,481 +0,0 @@
// Code generated by gen_sort_variants.go; DO NOT EDIT.
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package slices
import "golang.org/x/exp/constraints"
// insertionSortOrdered sorts data[a:b] using insertion sort.
func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
for i := a + 1; i < b; i++ {
for j := i; j > a && cmpLess(data[j], data[j-1]); j-- {
data[j], data[j-1] = data[j-1], data[j]
}
}
}
// siftDownOrdered implements the heap property on data[lo:hi].
// first is an offset into the array where the root of the heap lies.
func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
root := lo
for {
child := 2*root + 1
if child >= hi {
break
}
if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) {
child++
}
if !cmpLess(data[first+root], data[first+child]) {
return
}
data[first+root], data[first+child] = data[first+child], data[first+root]
root = child
}
}
func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
first := a
lo := 0
hi := b - a
// Build heap with greatest element at top.
for i := (hi - 1) / 2; i >= 0; i-- {
siftDownOrdered(data, i, hi, first)
}
// Pop elements, largest first, into end of data.
for i := hi - 1; i >= 0; i-- {
data[first], data[first+i] = data[first+i], data[first]
siftDownOrdered(data, lo, i, first)
}
}
// pdqsortOrdered sorts data[a:b].
// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
// C++ implementation: https://github.com/orlp/pdqsort
// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
const maxInsertion = 12
var (
wasBalanced = true // whether the last partitioning was reasonably balanced
wasPartitioned = true // whether the slice was already partitioned
)
for {
length := b - a
if length <= maxInsertion {
insertionSortOrdered(data, a, b)
return
}
// Fall back to heapsort if too many bad choices were made.
if limit == 0 {
heapSortOrdered(data, a, b)
return
}
// If the last partitioning was imbalanced, we need to breaking patterns.
if !wasBalanced {
breakPatternsOrdered(data, a, b)
limit--
}
pivot, hint := choosePivotOrdered(data, a, b)
if hint == decreasingHint {
reverseRangeOrdered(data, a, b)
// The chosen pivot was pivot-a elements after the start of the array.
// After reversing it is pivot-a elements before the end of the array.
// The idea came from Rust's implementation.
pivot = (b - 1) - (pivot - a)
hint = increasingHint
}
// The slice is likely already sorted.
if wasBalanced && wasPartitioned && hint == increasingHint {
if partialInsertionSortOrdered(data, a, b) {
return
}
}
// Probably the slice contains many duplicate elements, partition the slice into
// elements equal to and elements greater than the pivot.
if a > 0 && !cmpLess(data[a-1], data[pivot]) {
mid := partitionEqualOrdered(data, a, b, pivot)
a = mid
continue
}
mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
wasPartitioned = alreadyPartitioned
leftLen, rightLen := mid-a, b-mid
balanceThreshold := length / 8
if leftLen < rightLen {
wasBalanced = leftLen >= balanceThreshold
pdqsortOrdered(data, a, mid, limit)
a = mid + 1
} else {
wasBalanced = rightLen >= balanceThreshold
pdqsortOrdered(data, mid+1, b, limit)
b = mid
}
}
}
// partitionOrdered does one quicksort partition.
// Let p = data[pivot]
// Moves elements in data[a:b] around, so that data[i]<p and data[j]>=p for i<newpivot and j>newpivot.
// On return, data[newpivot] = p
func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
data[a], data[pivot] = data[pivot], data[a]
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
for i <= j && cmpLess(data[i], data[a]) {
i++
}
for i <= j && !cmpLess(data[j], data[a]) {
j--
}
if i > j {
data[j], data[a] = data[a], data[j]
return j, true
}
data[i], data[j] = data[j], data[i]
i++
j--
for {
for i <= j && cmpLess(data[i], data[a]) {
i++
}
for i <= j && !cmpLess(data[j], data[a]) {
j--
}
if i > j {
break
}
data[i], data[j] = data[j], data[i]
i++
j--
}
data[j], data[a] = data[a], data[j]
return j, false
}
// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) {
data[a], data[pivot] = data[pivot], data[a]
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
for {
for i <= j && !cmpLess(data[a], data[i]) {
i++
}
for i <= j && cmpLess(data[a], data[j]) {
j--
}
if i > j {
break
}
data[i], data[j] = data[j], data[i]
i++
j--
}
return i
}
// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end.
func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool {
const (
maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
shortestShifting = 50 // don't shift any elements on short arrays
)
i := a + 1
for j := 0; j < maxSteps; j++ {
for i < b && !cmpLess(data[i], data[i-1]) {
i++
}
if i == b {
return true
}
if b-a < shortestShifting {
return false
}
data[i], data[i-1] = data[i-1], data[i]
// Shift the smaller one to the left.
if i-a >= 2 {
for j := i - 1; j >= 1; j-- {
if !cmpLess(data[j], data[j-1]) {
break
}
data[j], data[j-1] = data[j-1], data[j]
}
}
// Shift the greater one to the right.
if b-i >= 2 {
for j := i + 1; j < b; j++ {
if !cmpLess(data[j], data[j-1]) {
break
}
data[j], data[j-1] = data[j-1], data[j]
}
}
}
return false
}
// breakPatternsOrdered scatters some elements around in an attempt to break some patterns
// that might cause imbalanced partitions in quicksort.
func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) {
length := b - a
if length >= 8 {
random := xorshift(length)
modulus := nextPowerOfTwo(length)
for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
other := int(uint(random.Next()) & (modulus - 1))
if other >= length {
other -= length
}
data[idx], data[a+other] = data[a+other], data[idx]
}
}
}
// choosePivotOrdered chooses a pivot in data[a:b].
//
// [0,8): chooses a static pivot.
// [8,shortestNinther): uses the simple median-of-three method.
// [shortestNinther,∞): uses the Tukey ninther method.
func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) {
const (
shortestNinther = 50
maxSwaps = 4 * 3
)
l := b - a
var (
swaps int
i = a + l/4*1
j = a + l/4*2
k = a + l/4*3
)
if l >= 8 {
if l >= shortestNinther {
// Tukey ninther method, the idea came from Rust's implementation.
i = medianAdjacentOrdered(data, i, &swaps)
j = medianAdjacentOrdered(data, j, &swaps)
k = medianAdjacentOrdered(data, k, &swaps)
}
// Find the median among i, j, k and stores it into j.
j = medianOrdered(data, i, j, k, &swaps)
}
switch swaps {
case 0:
return j, increasingHint
case maxSwaps:
return j, decreasingHint
default:
return j, unknownHint
}
}
// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
if cmpLess(data[b], data[a]) {
*swaps++
return b, a
}
return a, b
}
// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int {
a, b = order2Ordered(data, a, b, swaps)
b, c = order2Ordered(data, b, c, swaps)
a, b = order2Ordered(data, a, b, swaps)
return b
}
// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int {
return medianOrdered(data, a-1, a, a+1, swaps)
}
func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) {
i := a
j := b - 1
for i < j {
data[i], data[j] = data[j], data[i]
i++
j--
}
}
func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) {
for i := 0; i < n; i++ {
data[a+i], data[b+i] = data[b+i], data[a+i]
}
}
func stableOrdered[E constraints.Ordered](data []E, n int) {
blockSize := 20 // must be > 0
a, b := 0, blockSize
for b <= n {
insertionSortOrdered(data, a, b)
a = b
b += blockSize
}
insertionSortOrdered(data, a, n)
for blockSize < n {
a, b = 0, 2*blockSize
for b <= n {
symMergeOrdered(data, a, a+blockSize, b)
a = b
b += 2 * blockSize
}
if m := a + blockSize; m < n {
symMergeOrdered(data, a, m, n)
}
blockSize *= 2
}
}
// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using
// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
// Computer Science, pages 714-723. Springer, 2004.
//
// Let M = m-a and N = b-n. Wolog M < N.
// The recursion depth is bound by ceil(log(N+M)).
// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
//
// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
// in the paper carries through for Swap operations, especially as the block
// swapping rotate uses only O(M+N) Swaps.
//
// symMerge assumes non-degenerate arguments: a < m && m < b.
// Having the caller check this condition eliminates many leaf recursion calls,
// which improves performance.
func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
// Avoid unnecessary recursions of symMerge
// by direct insertion of data[a] into data[m:b]
// if data[a:m] only contains one element.
if m-a == 1 {
// Use binary search to find the lowest index i
// such that data[i] >= data[a] for m <= i < b.
// Exit the search loop with i == b in case no such index exists.
i := m
j := b
for i < j {
h := int(uint(i+j) >> 1)
if cmpLess(data[h], data[a]) {
i = h + 1
} else {
j = h
}
}
// Swap values until data[a] reaches the position before i.
for k := a; k < i-1; k++ {
data[k], data[k+1] = data[k+1], data[k]
}
return
}
// Avoid unnecessary recursions of symMerge
// by direct insertion of data[m] into data[a:m]
// if data[m:b] only contains one element.
if b-m == 1 {
// Use binary search to find the lowest index i
// such that data[i] > data[m] for a <= i < m.
// Exit the search loop with i == m in case no such index exists.
i := a
j := m
for i < j {
h := int(uint(i+j) >> 1)
if !cmpLess(data[m], data[h]) {
i = h + 1
} else {
j = h
}
}
// Swap values until data[m] reaches the position i.
for k := m; k > i; k-- {
data[k], data[k-1] = data[k-1], data[k]
}
return
}
mid := int(uint(a+b) >> 1)
n := mid + m
var start, r int
if m > mid {
start = n - b
r = mid
} else {
start = a
r = m
}
p := n - 1
for start < r {
c := int(uint(start+r) >> 1)
if !cmpLess(data[p-c], data[c]) {
start = c + 1
} else {
r = c
}
}
end := n - start
if start < m && m < end {
rotateOrdered(data, start, m, end)
}
if a < start && start < mid {
symMergeOrdered(data, a, start, mid)
}
if mid < end && end < b {
symMergeOrdered(data, mid, end, b)
}
}
// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
// Data of the form 'x u v y' is changed to 'x v u y'.
// rotate performs at most b-a many calls to data.Swap,
// and it assumes non-degenerate arguments: a < m && m < b.
func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) {
i := m - a
j := b - m
for i != j {
if i > j {
swapRangeOrdered(data, m-i, m, j)
i -= j
} else {
swapRangeOrdered(data, m-i, m+j-i, i)
j -= i
}
}
// i == j
swapRangeOrdered(data, m-i, m, i)
}

View file

@ -34,11 +34,19 @@ import (
)
var (
VerboseLogs bool
logFrameWrites bool
logFrameReads bool
inTests bool
disableExtendedConnectProtocol bool
VerboseLogs bool
logFrameWrites bool
logFrameReads bool
inTests bool
// Enabling extended CONNECT by causes browsers to attempt to use
// WebSockets-over-HTTP/2. This results in problems when the server's websocket
// package doesn't support extended CONNECT.
//
// Disable extended CONNECT by default for now.
//
// Issue #71128.
disableExtendedConnectProtocol = true
)
func init() {
@ -51,8 +59,8 @@ func init() {
logFrameWrites = true
logFrameReads = true
}
if strings.Contains(e, "http2xconnect=0") {
disableExtendedConnectProtocol = true
if strings.Contains(e, "http2xconnect=1") {
disableExtendedConnectProtocol = false
}
}
@ -407,23 +415,6 @@ func (s *sorter) SortStrings(ss []string) {
s.v = save
}
// validPseudoPath reports whether v is a valid :path pseudo-header
// value. It must be either:
//
// - a non-empty string starting with '/'
// - the string '*', for OPTIONS requests.
//
// For now this is only used a quick check for deciding when to clean
// up Opaque URLs before sending requests from the Transport.
// See golang.org/issue/16847
//
// We used to enforce that the path also didn't start with "//", but
// Google's GFE accepts such paths and Chrome sends them, so ignore
// that part of the spec. See golang.org/issue/19103.
func validPseudoPath(v string) bool {
return (len(v) > 0 && v[0] == '/') || v == "*"
}
// incomparable is a zero-width, non-comparable type. Adding it to a struct
// makes that struct also non-comparable, and generally doesn't add
// any size (as long as it's first).

View file

@ -50,6 +50,7 @@ import (
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack"
"golang.org/x/net/internal/httpcommon"
)
const (
@ -812,8 +813,7 @@ const maxCachedCanonicalHeadersKeysSize = 2048
func (sc *serverConn) canonicalHeader(v string) string {
sc.serveG.check()
buildCommonHeaderMapsOnce()
cv, ok := commonCanonHeader[v]
cv, ok := httpcommon.CachedCanonicalHeader(v)
if ok {
return cv
}

View file

@ -25,7 +25,6 @@ import (
"net/http"
"net/http/httptrace"
"net/textproto"
"sort"
"strconv"
"strings"
"sync"
@ -35,6 +34,7 @@ import (
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack"
"golang.org/x/net/idna"
"golang.org/x/net/internal/httpcommon"
)
const (
@ -1275,23 +1275,6 @@ func (cc *ClientConn) closeForLostPing() {
// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.
var errRequestCanceled = errors.New("net/http: request canceled")
func commaSeparatedTrailers(req *http.Request) (string, error) {
keys := make([]string, 0, len(req.Trailer))
for k := range req.Trailer {
k = canonicalHeader(k)
switch k {
case "Transfer-Encoding", "Trailer", "Content-Length":
return "", fmt.Errorf("invalid Trailer key %q", k)
}
keys = append(keys, k)
}
if len(keys) > 0 {
sort.Strings(keys)
return strings.Join(keys, ","), nil
}
return "", nil
}
func (cc *ClientConn) responseHeaderTimeout() time.Duration {
if cc.t.t1 != nil {
return cc.t.t1.ResponseHeaderTimeout
@ -1303,35 +1286,6 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration {
return 0
}
// checkConnHeaders checks whether req has any invalid connection-level headers.
// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields.
// Certain headers are special-cased as okay but not transmitted later.
func checkConnHeaders(req *http.Request) error {
if v := req.Header.Get("Upgrade"); v != "" {
return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"])
}
if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv)
}
if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) {
return fmt.Errorf("http2: invalid Connection request header: %q", vv)
}
return nil
}
// actualContentLength returns a sanitized version of
// req.ContentLength, where 0 actually means zero (not unknown) and -1
// means unknown.
func actualContentLength(req *http.Request) int64 {
if req.Body == nil || req.Body == http.NoBody {
return 0
}
if req.ContentLength != 0 {
return req.ContentLength
}
return -1
}
func (cc *ClientConn) decrStreamReservations() {
cc.mu.Lock()
defer cc.mu.Unlock()
@ -1356,7 +1310,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
reqCancel: req.Cancel,
isHead: req.Method == "HEAD",
reqBody: req.Body,
reqBodyContentLength: actualContentLength(req),
reqBodyContentLength: httpcommon.ActualContentLength(req),
trace: httptrace.ContextClientTrace(ctx),
peerClosed: make(chan struct{}),
abort: make(chan struct{}),
@ -1364,25 +1318,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
donec: make(chan struct{}),
}
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
if !cc.t.disableCompression() &&
req.Header.Get("Accept-Encoding") == "" &&
req.Header.Get("Range") == "" &&
!cs.isHead {
// Request gzip only, not deflate. Deflate is ambiguous and
// not as universally supported anyway.
// See: https://zlib.net/zlib_faq.html#faq39
//
// Note that we don't request this for HEAD requests,
// due to a bug in nginx:
// http://trac.nginx.org/nginx/ticket/358
// https://golang.org/issue/5522
//
// We don't request gzip if the request is for a range, since
// auto-decoding a portion of a gzipped document will just fail
// anyway. See https://golang.org/issue/8923
cs.requestedGzip = true
}
cs.requestedGzip = httpcommon.IsRequestGzip(req, cc.t.disableCompression())
go cs.doRequest(req, streamf)
@ -1413,7 +1349,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
}
res.Request = req
res.TLS = cc.tlsState
if res.Body == noBody && actualContentLength(req) == 0 {
if res.Body == noBody && httpcommon.ActualContentLength(req) == 0 {
// If there isn't a request or response body still being
// written, then wait for the stream to be closed before
// RoundTrip returns.
@ -1496,10 +1432,6 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre
cc := cs.cc
ctx := cs.ctx
if err := checkConnHeaders(req); err != nil {
return err
}
// wait for setting frames to be received, a server can change this value later,
// but we just wait for the first settings frame
var isExtendedConnect bool
@ -1663,20 +1595,22 @@ func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error {
// we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
// sent by writeRequestBody below, along with any Trailers,
// again in form HEADERS{1}, CONTINUATION{0,})
trailers, err := commaSeparatedTrailers(req)
cc.hbuf.Reset()
res, err := httpcommon.EncodeHeaders(httpcommon.EncodeHeadersParam{
Request: req,
AddGzipHeader: cs.requestedGzip,
PeerMaxHeaderListSize: cc.peerMaxHeaderListSize,
DefaultUserAgent: defaultUserAgent,
}, func(name, value string) {
cc.writeHeader(name, value)
})
if err != nil {
return err
}
hasTrailers := trailers != ""
contentLen := actualContentLength(req)
hasBody := contentLen != 0
hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen)
if err != nil {
return err
return fmt.Errorf("http2: %w", err)
}
hdrs := cc.hbuf.Bytes()
// Write the request.
endStream := !hasBody && !hasTrailers
endStream := !res.HasBody && !res.HasTrailers
cs.sentHeaders = true
err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs)
traceWroteHeaders(cs.trace)
@ -2070,218 +2004,6 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)
}
}
func validateHeaders(hdrs http.Header) string {
for k, vv := range hdrs {
if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" {
return fmt.Sprintf("name %q", k)
}
for _, v := range vv {
if !httpguts.ValidHeaderFieldValue(v) {
// Don't include the value in the error,
// because it may be sensitive.
return fmt.Sprintf("value for header %q", k)
}
}
}
return ""
}
var errNilRequestURL = errors.New("http2: Request.URI is nil")
func isNormalConnect(req *http.Request) bool {
return req.Method == "CONNECT" && req.Header.Get(":protocol") == ""
}
// requires cc.wmu be held.
func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
cc.hbuf.Reset()
if req.URL == nil {
return nil, errNilRequestURL
}
host := req.Host
if host == "" {
host = req.URL.Host
}
host, err := httpguts.PunycodeHostPort(host)
if err != nil {
return nil, err
}
if !httpguts.ValidHostHeader(host) {
return nil, errors.New("http2: invalid Host header")
}
var path string
if !isNormalConnect(req) {
path = req.URL.RequestURI()
if !validPseudoPath(path) {
orig := path
path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
if !validPseudoPath(path) {
if req.URL.Opaque != "" {
return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
} else {
return nil, fmt.Errorf("invalid request :path %q", orig)
}
}
}
}
// Check for any invalid headers+trailers and return an error before we
// potentially pollute our hpack state. (We want to be able to
// continue to reuse the hpack encoder for future requests)
if err := validateHeaders(req.Header); err != "" {
return nil, fmt.Errorf("invalid HTTP header %s", err)
}
if err := validateHeaders(req.Trailer); err != "" {
return nil, fmt.Errorf("invalid HTTP trailer %s", err)
}
enumerateHeaders := func(f func(name, value string)) {
// 8.1.2.3 Request Pseudo-Header Fields
// The :path pseudo-header field includes the path and query parts of the
// target URI (the path-absolute production and optionally a '?' character
// followed by the query production, see Sections 3.3 and 3.4 of
// [RFC3986]).
f(":authority", host)
m := req.Method
if m == "" {
m = http.MethodGet
}
f(":method", m)
if !isNormalConnect(req) {
f(":path", path)
f(":scheme", req.URL.Scheme)
}
if trailers != "" {
f("trailer", trailers)
}
var didUA bool
for k, vv := range req.Header {
if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") {
// Host is :authority, already sent.
// Content-Length is automatic, set below.
continue
} else if asciiEqualFold(k, "connection") ||
asciiEqualFold(k, "proxy-connection") ||
asciiEqualFold(k, "transfer-encoding") ||
asciiEqualFold(k, "upgrade") ||
asciiEqualFold(k, "keep-alive") {
// Per 8.1.2.2 Connection-Specific Header
// Fields, don't send connection-specific
// fields. We have already checked if any
// are error-worthy so just ignore the rest.
continue
} else if asciiEqualFold(k, "user-agent") {
// Match Go's http1 behavior: at most one
// User-Agent. If set to nil or empty string,
// then omit it. Otherwise if not mentioned,
// include the default (below).
didUA = true
if len(vv) < 1 {
continue
}
vv = vv[:1]
if vv[0] == "" {
continue
}
} else if asciiEqualFold(k, "cookie") {
// Per 8.1.2.5 To allow for better compression efficiency, the
// Cookie header field MAY be split into separate header fields,
// each with one or more cookie-pairs.
for _, v := range vv {
for {
p := strings.IndexByte(v, ';')
if p < 0 {
break
}
f("cookie", v[:p])
p++
// strip space after semicolon if any.
for p+1 <= len(v) && v[p] == ' ' {
p++
}
v = v[p:]
}
if len(v) > 0 {
f("cookie", v)
}
}
continue
}
for _, v := range vv {
f(k, v)
}
}
if shouldSendReqContentLength(req.Method, contentLength) {
f("content-length", strconv.FormatInt(contentLength, 10))
}
if addGzipHeader {
f("accept-encoding", "gzip")
}
if !didUA {
f("user-agent", defaultUserAgent)
}
}
// Do a first pass over the headers counting bytes to ensure
// we don't exceed cc.peerMaxHeaderListSize. This is done as a
// separate pass before encoding the headers to prevent
// modifying the hpack state.
hlSize := uint64(0)
enumerateHeaders(func(name, value string) {
hf := hpack.HeaderField{Name: name, Value: value}
hlSize += uint64(hf.Size())
})
if hlSize > cc.peerMaxHeaderListSize {
return nil, errRequestHeaderListSize
}
trace := httptrace.ContextClientTrace(req.Context())
traceHeaders := traceHasWroteHeaderField(trace)
// Header list size is ok. Write the headers.
enumerateHeaders(func(name, value string) {
name, ascii := lowerHeader(name)
if !ascii {
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
// field names have to be ASCII characters (just as in HTTP/1.x).
return
}
cc.writeHeader(name, value)
if traceHeaders {
traceWroteHeaderField(trace, name, value)
}
})
return cc.hbuf.Bytes(), nil
}
// shouldSendReqContentLength reports whether the http2.Transport should send
// a "content-length" request header. This logic is basically a copy of the net/http
// transferWriter.shouldSendContentLength.
// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
// -1 means unknown.
func shouldSendReqContentLength(method string, contentLength int64) bool {
if contentLength > 0 {
return true
}
if contentLength < 0 {
return false
}
// For zero bodies, whether we send a content-length depends on the method.
// It also kinda doesn't matter for http2 either way, with END_STREAM.
switch method {
case "POST", "PUT", "PATCH":
return true
default:
return false
}
}
// requires cc.wmu be held.
func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) {
cc.hbuf.Reset()
@ -2298,7 +2020,7 @@ func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) {
}
for k, vv := range trailer {
lowKey, ascii := lowerHeader(k)
lowKey, ascii := httpcommon.LowerHeader(k)
if !ascii {
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
// field names have to be ASCII characters (just as in HTTP/1.x).
@ -2653,7 +2375,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
Status: status + " " + http.StatusText(statusCode),
}
for _, hf := range regularFields {
key := canonicalHeader(hf.Name)
key := httpcommon.CanonicalHeader(hf.Name)
if key == "Trailer" {
t := res.Trailer
if t == nil {
@ -2661,7 +2383,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
res.Trailer = t
}
foreachHeaderElement(hf.Value, func(v string) {
t[canonicalHeader(v)] = nil
t[httpcommon.CanonicalHeader(v)] = nil
})
} else {
vv := header[key]
@ -2785,7 +2507,7 @@ func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFr
trailer := make(http.Header)
for _, hf := range f.RegularFields() {
key := canonicalHeader(hf.Name)
key := httpcommon.CanonicalHeader(hf.Name)
trailer[key] = append(trailer[key], hf.Value)
}
cs.trailer = trailer
@ -3331,7 +3053,7 @@ func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool,
var (
errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit")
errRequestHeaderListSize = httpcommon.ErrRequestHeaderListSize
)
func (cc *ClientConn) logf(format string, args ...interface{}) {
@ -3515,16 +3237,6 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) {
}
}
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
return trace != nil && trace.WroteHeaderField != nil
}
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
if trace != nil && trace.WroteHeaderField != nil {
trace.WroteHeaderField(k, []string{v})
}
}
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
if trace != nil {
return trace.Got1xxResponse

View file

@ -13,6 +13,7 @@ import (
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack"
"golang.org/x/net/internal/httpcommon"
)
// writeFramer is implemented by any type that is used to write frames.
@ -351,7 +352,7 @@ func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
}
for _, k := range keys {
vv := h[k]
k, ascii := lowerHeader(k)
k, ascii := httpcommon.LowerHeader(k)
if !ascii {
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
// field names have to be ASCII characters (just as in HTTP/1.x).

53
vendor/golang.org/x/net/internal/httpcommon/ascii.go generated vendored Normal file
View file

@ -0,0 +1,53 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package httpcommon
import "strings"
// The HTTP protocols are defined in terms of ASCII, not Unicode. This file
// contains helper functions which may use Unicode-aware functions which would
// otherwise be unsafe and could introduce vulnerabilities if used improperly.
// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t
// are equal, ASCII-case-insensitively.
func asciiEqualFold(s, t string) bool {
if len(s) != len(t) {
return false
}
for i := 0; i < len(s); i++ {
if lower(s[i]) != lower(t[i]) {
return false
}
}
return true
}
// lower returns the ASCII lowercase version of b.
func lower(b byte) byte {
if 'A' <= b && b <= 'Z' {
return b + ('a' - 'A')
}
return b
}
// isASCIIPrint returns whether s is ASCII and printable according to
// https://tools.ietf.org/html/rfc20#section-4.2.
func isASCIIPrint(s string) bool {
for i := 0; i < len(s); i++ {
if s[i] < ' ' || s[i] > '~' {
return false
}
}
return true
}
// asciiToLower returns the lowercase version of s if s is ASCII and printable,
// and whether or not it was.
func asciiToLower(s string) (lower string, ok bool) {
if !isASCIIPrint(s) {
return "", false
}
return strings.ToLower(s), true
}

View file

@ -1,8 +1,8 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
package httpcommon
import (
"net/http"
@ -88,7 +88,9 @@ func buildCommonHeaderMaps() {
}
}
func lowerHeader(v string) (lower string, ascii bool) {
// LowerHeader returns the lowercase form of a header name,
// used on the wire for HTTP/2 and HTTP/3 requests.
func LowerHeader(v string) (lower string, ascii bool) {
buildCommonHeaderMapsOnce()
if s, ok := commonLowerHeader[v]; ok {
return s, true
@ -96,10 +98,18 @@ func lowerHeader(v string) (lower string, ascii bool) {
return asciiToLower(v)
}
func canonicalHeader(v string) string {
// CanonicalHeader canonicalizes a header name. (For example, "host" becomes "Host".)
func CanonicalHeader(v string) string {
buildCommonHeaderMapsOnce()
if s, ok := commonCanonHeader[v]; ok {
return s
}
return http.CanonicalHeaderKey(v)
}
// CachedCanonicalHeader returns the canonical form of a well-known header name.
func CachedCanonicalHeader(v string) (string, bool) {
buildCommonHeaderMapsOnce()
s, ok := commonCanonHeader[v]
return s, ok
}

379
vendor/golang.org/x/net/internal/httpcommon/request.go generated vendored Normal file
View file

@ -0,0 +1,379 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package httpcommon
import (
"errors"
"fmt"
"net/http"
"net/http/httptrace"
"sort"
"strconv"
"strings"
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack"
)
var (
ErrRequestHeaderListSize = errors.New("request header list larger than peer's advertised limit")
)
// EncodeHeadersParam is parameters to EncodeHeaders.
type EncodeHeadersParam struct {
Request *http.Request
// AddGzipHeader indicates that an "accept-encoding: gzip" header should be
// added to the request.
AddGzipHeader bool
// PeerMaxHeaderListSize, when non-zero, is the peer's MAX_HEADER_LIST_SIZE setting.
PeerMaxHeaderListSize uint64
// DefaultUserAgent is the User-Agent header to send when the request
// neither contains a User-Agent nor disables it.
DefaultUserAgent string
}
// EncodeHeadersParam is the result of EncodeHeaders.
type EncodeHeadersResult struct {
HasBody bool
HasTrailers bool
}
// EncodeHeaders constructs request headers common to HTTP/2 and HTTP/3.
// It validates a request and calls headerf with each pseudo-header and header
// for the request.
// The headerf function is called with the validated, canonicalized header name.
func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) {
req := param.Request
// Check for invalid connection-level headers.
if err := checkConnHeaders(req); err != nil {
return res, err
}
if req.URL == nil {
return res, errors.New("Request.URL is nil")
}
host := req.Host
if host == "" {
host = req.URL.Host
}
host, err := httpguts.PunycodeHostPort(host)
if err != nil {
return res, err
}
if !httpguts.ValidHostHeader(host) {
return res, errors.New("invalid Host header")
}
// isNormalConnect is true if this is a non-extended CONNECT request.
isNormalConnect := false
protocol := req.Header.Get(":protocol")
if req.Method == "CONNECT" && protocol == "" {
isNormalConnect = true
} else if protocol != "" && req.Method != "CONNECT" {
return res, errors.New("invalid :protocol header in non-CONNECT request")
}
// Validate the path, except for non-extended CONNECT requests which have no path.
var path string
if !isNormalConnect {
path = req.URL.RequestURI()
if !validPseudoPath(path) {
orig := path
path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
if !validPseudoPath(path) {
if req.URL.Opaque != "" {
return res, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
} else {
return res, fmt.Errorf("invalid request :path %q", orig)
}
}
}
}
// Check for any invalid headers+trailers and return an error before we
// potentially pollute our hpack state. (We want to be able to
// continue to reuse the hpack encoder for future requests)
if err := validateHeaders(req.Header); err != "" {
return res, fmt.Errorf("invalid HTTP header %s", err)
}
if err := validateHeaders(req.Trailer); err != "" {
return res, fmt.Errorf("invalid HTTP trailer %s", err)
}
contentLength := ActualContentLength(req)
trailers, err := commaSeparatedTrailers(req)
if err != nil {
return res, err
}
enumerateHeaders := func(f func(name, value string)) {
// 8.1.2.3 Request Pseudo-Header Fields
// The :path pseudo-header field includes the path and query parts of the
// target URI (the path-absolute production and optionally a '?' character
// followed by the query production, see Sections 3.3 and 3.4 of
// [RFC3986]).
f(":authority", host)
m := req.Method
if m == "" {
m = http.MethodGet
}
f(":method", m)
if !isNormalConnect {
f(":path", path)
f(":scheme", req.URL.Scheme)
}
if protocol != "" {
f(":protocol", protocol)
}
if trailers != "" {
f("trailer", trailers)
}
var didUA bool
for k, vv := range req.Header {
if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") {
// Host is :authority, already sent.
// Content-Length is automatic, set below.
continue
} else if asciiEqualFold(k, "connection") ||
asciiEqualFold(k, "proxy-connection") ||
asciiEqualFold(k, "transfer-encoding") ||
asciiEqualFold(k, "upgrade") ||
asciiEqualFold(k, "keep-alive") {
// Per 8.1.2.2 Connection-Specific Header
// Fields, don't send connection-specific
// fields. We have already checked if any
// are error-worthy so just ignore the rest.
continue
} else if asciiEqualFold(k, "user-agent") {
// Match Go's http1 behavior: at most one
// User-Agent. If set to nil or empty string,
// then omit it. Otherwise if not mentioned,
// include the default (below).
didUA = true
if len(vv) < 1 {
continue
}
vv = vv[:1]
if vv[0] == "" {
continue
}
} else if asciiEqualFold(k, "cookie") {
// Per 8.1.2.5 To allow for better compression efficiency, the
// Cookie header field MAY be split into separate header fields,
// each with one or more cookie-pairs.
for _, v := range vv {
for {
p := strings.IndexByte(v, ';')
if p < 0 {
break
}
f("cookie", v[:p])
p++
// strip space after semicolon if any.
for p+1 <= len(v) && v[p] == ' ' {
p++
}
v = v[p:]
}
if len(v) > 0 {
f("cookie", v)
}
}
continue
} else if k == ":protocol" {
// :protocol pseudo-header was already sent above.
continue
}
for _, v := range vv {
f(k, v)
}
}
if shouldSendReqContentLength(req.Method, contentLength) {
f("content-length", strconv.FormatInt(contentLength, 10))
}
if param.AddGzipHeader {
f("accept-encoding", "gzip")
}
if !didUA {
f("user-agent", param.DefaultUserAgent)
}
}
// Do a first pass over the headers counting bytes to ensure
// we don't exceed cc.peerMaxHeaderListSize. This is done as a
// separate pass before encoding the headers to prevent
// modifying the hpack state.
if param.PeerMaxHeaderListSize > 0 {
hlSize := uint64(0)
enumerateHeaders(func(name, value string) {
hf := hpack.HeaderField{Name: name, Value: value}
hlSize += uint64(hf.Size())
})
if hlSize > param.PeerMaxHeaderListSize {
return res, ErrRequestHeaderListSize
}
}
trace := httptrace.ContextClientTrace(req.Context())
// Header list size is ok. Write the headers.
enumerateHeaders(func(name, value string) {
name, ascii := LowerHeader(name)
if !ascii {
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
// field names have to be ASCII characters (just as in HTTP/1.x).
return
}
headerf(name, value)
if trace != nil && trace.WroteHeaderField != nil {
trace.WroteHeaderField(name, []string{value})
}
})
res.HasBody = contentLength != 0
res.HasTrailers = trailers != ""
return res, nil
}
// IsRequestGzip reports whether we should add an Accept-Encoding: gzip header
// for a request.
func IsRequestGzip(req *http.Request, disableCompression bool) bool {
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
if !disableCompression &&
req.Header.Get("Accept-Encoding") == "" &&
req.Header.Get("Range") == "" &&
req.Method != "HEAD" {
// Request gzip only, not deflate. Deflate is ambiguous and
// not as universally supported anyway.
// See: https://zlib.net/zlib_faq.html#faq39
//
// Note that we don't request this for HEAD requests,
// due to a bug in nginx:
// http://trac.nginx.org/nginx/ticket/358
// https://golang.org/issue/5522
//
// We don't request gzip if the request is for a range, since
// auto-decoding a portion of a gzipped document will just fail
// anyway. See https://golang.org/issue/8923
return true
}
return false
}
// checkConnHeaders checks whether req has any invalid connection-level headers.
//
// https://www.rfc-editor.org/rfc/rfc9114.html#section-4.2-3
// https://www.rfc-editor.org/rfc/rfc9113.html#section-8.2.2-1
//
// Certain headers are special-cased as okay but not transmitted later.
// For example, we allow "Transfer-Encoding: chunked", but drop the header when encoding.
func checkConnHeaders(req *http.Request) error {
if v := req.Header.Get("Upgrade"); v != "" {
return fmt.Errorf("invalid Upgrade request header: %q", req.Header["Upgrade"])
}
if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
return fmt.Errorf("invalid Transfer-Encoding request header: %q", vv)
}
if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) {
return fmt.Errorf("invalid Connection request header: %q", vv)
}
return nil
}
func commaSeparatedTrailers(req *http.Request) (string, error) {
keys := make([]string, 0, len(req.Trailer))
for k := range req.Trailer {
k = CanonicalHeader(k)
switch k {
case "Transfer-Encoding", "Trailer", "Content-Length":
return "", fmt.Errorf("invalid Trailer key %q", k)
}
keys = append(keys, k)
}
if len(keys) > 0 {
sort.Strings(keys)
return strings.Join(keys, ","), nil
}
return "", nil
}
// ActualContentLength returns a sanitized version of
// req.ContentLength, where 0 actually means zero (not unknown) and -1
// means unknown.
func ActualContentLength(req *http.Request) int64 {
if req.Body == nil || req.Body == http.NoBody {
return 0
}
if req.ContentLength != 0 {
return req.ContentLength
}
return -1
}
// validPseudoPath reports whether v is a valid :path pseudo-header
// value. It must be either:
//
// - a non-empty string starting with '/'
// - the string '*', for OPTIONS requests.
//
// For now this is only used a quick check for deciding when to clean
// up Opaque URLs before sending requests from the Transport.
// See golang.org/issue/16847
//
// We used to enforce that the path also didn't start with "//", but
// Google's GFE accepts such paths and Chrome sends them, so ignore
// that part of the spec. See golang.org/issue/19103.
func validPseudoPath(v string) bool {
return (len(v) > 0 && v[0] == '/') || v == "*"
}
func validateHeaders(hdrs http.Header) string {
for k, vv := range hdrs {
if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" {
return fmt.Sprintf("name %q", k)
}
for _, v := range vv {
if !httpguts.ValidHeaderFieldValue(v) {
// Don't include the value in the error,
// because it may be sensitive.
return fmt.Sprintf("value for header %q", k)
}
}
}
return ""
}
// shouldSendReqContentLength reports whether we should send
// a "content-length" request header. This logic is basically a copy of the net/http
// transferWriter.shouldSendContentLength.
// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
// -1 means unknown.
func shouldSendReqContentLength(method string, contentLength int64) bool {
if contentLength > 0 {
return true
}
if contentLength < 0 {
return false
}
// For zero bodies, whether we send a content-length depends on the method.
// It also kinda doesn't matter for http2 either way, with END_STREAM.
switch method {
case "POST", "PUT", "PATCH":
return true
default:
return false
}
}

View file

@ -251,6 +251,12 @@ func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials
// a Google Developers service account key file, a gcloud user credentials file (a.k.a. refresh
// token JSON), or the JSON configuration file for workload identity federation in non-Google cloud
// platforms (see https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation).
//
// Important: If you accept a credential configuration (credential JSON/File/Stream) from an
// external source for authentication to Google Cloud Platform, you must validate it before
// providing it to any Google API or library. Providing an unvalidated credential configuration to
// Google APIs can compromise the security of your systems and data. For more information, refer to
// [Validate credential configurations from external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params CredentialsParams) (*Credentials, error) {
// Make defensive copy of the slices in params.
params = params.deepCopy()
@ -294,6 +300,12 @@ func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params
}
// CredentialsFromJSON invokes CredentialsFromJSONWithParams with the specified scopes.
//
// Important: If you accept a credential configuration (credential JSON/File/Stream) from an
// external source for authentication to Google Cloud Platform, you must validate it before
// providing it to any Google API or library. Providing an unvalidated credential configuration to
// Google APIs can compromise the security of your systems and data. For more information, refer to
// [Validate credential configurations from external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) {
var params CredentialsParams
params.Scopes = scopes

View file

@ -278,20 +278,52 @@ type Format struct {
type CredentialSource struct {
// File is the location for file sourced credentials.
// One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question.
//
// Important: If you accept a credential configuration (credential
// JSON/File/Stream) from an external source for authentication to Google
// Cloud Platform, you must validate it before providing it to any Google
// API or library. Providing an unvalidated credential configuration to
// Google APIs can compromise the security of your systems and data. For
// more information, refer to [Validate credential configurations from
// external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
File string `json:"file"`
// Url is the URL to call for URL sourced credentials.
// One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question.
//
// Important: If you accept a credential configuration (credential
// JSON/File/Stream) from an external source for authentication to Google
// Cloud Platform, you must validate it before providing it to any Google
// API or library. Providing an unvalidated credential configuration to
// Google APIs can compromise the security of your systems and data. For
// more information, refer to [Validate credential configurations from
// external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
URL string `json:"url"`
// Headers are the headers to attach to the request for URL sourced credentials.
Headers map[string]string `json:"headers"`
// Executable is the configuration object for executable sourced credentials.
// One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question.
//
// Important: If you accept a credential configuration (credential
// JSON/File/Stream) from an external source for authentication to Google
// Cloud Platform, you must validate it before providing it to any Google
// API or library. Providing an unvalidated credential configuration to
// Google APIs can compromise the security of your systems and data. For
// more information, refer to [Validate credential configurations from
// external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
Executable *ExecutableConfig `json:"executable"`
// EnvironmentID is the EnvironmentID used for AWS sourced credentials. This should start with "AWS".
// One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question.
//
// Important: If you accept a credential configuration (credential
// JSON/File/Stream) from an external source for authentication to Google
// Cloud Platform, you must validate it before providing it to any Google
// API or library. Providing an unvalidated credential configuration to
// Google APIs can compromise the security of your systems and data. For
// more information, refer to [Validate credential configurations from
// external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
EnvironmentID string `json:"environment_id"`
// RegionURL is the metadata URL to retrieve the region from for EC2 AWS credentials.
RegionURL string `json:"region_url"`

View file

@ -118,6 +118,7 @@ func (g *Group) TryGo(f func() error) bool {
// SetLimit limits the number of active goroutines in this group to at most n.
// A negative value indicates no limit.
// A limit of zero will prevent any new goroutines from being added.
//
// Any subsequent call to the Go method will block until it can add an active
// goroutine without exceeding the configured limit.

3
vendor/golang.org/x/sys/cpu/cpu.go generated vendored
View file

@ -72,6 +72,9 @@ var X86 struct {
HasSSSE3 bool // Supplemental streaming SIMD extension 3
HasSSE41 bool // Streaming SIMD extension 4 and 4.1
HasSSE42 bool // Streaming SIMD extension 4 and 4.2
HasAVXIFMA bool // Advanced vector extension Integer Fused Multiply Add
HasAVXVNNI bool // Advanced vector extension Vector Neural Network Instructions
HasAVXVNNIInt8 bool // Advanced vector extension Vector Neural Network Int8 instructions
_ CacheLinePad
}

View file

@ -53,6 +53,9 @@ func initOptions() {
{Name: "sse41", Feature: &X86.HasSSE41},
{Name: "sse42", Feature: &X86.HasSSE42},
{Name: "ssse3", Feature: &X86.HasSSSE3},
{Name: "avxifma", Feature: &X86.HasAVXIFMA},
{Name: "avxvnni", Feature: &X86.HasAVXVNNI},
{Name: "avxvnniint8", Feature: &X86.HasAVXVNNIInt8},
// These capabilities should always be enabled on amd64:
{Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"},
@ -106,7 +109,7 @@ func archInit() {
return
}
_, ebx7, ecx7, edx7 := cpuid(7, 0)
eax7, ebx7, ecx7, edx7 := cpuid(7, 0)
X86.HasBMI1 = isSet(3, ebx7)
X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX
X86.HasBMI2 = isSet(8, ebx7)
@ -134,14 +137,24 @@ func archInit() {
X86.HasAVX512VAES = isSet(9, ecx7)
X86.HasAVX512VBMI2 = isSet(6, ecx7)
X86.HasAVX512BITALG = isSet(12, ecx7)
eax71, _, _, _ := cpuid(7, 1)
X86.HasAVX512BF16 = isSet(5, eax71)
}
X86.HasAMXTile = isSet(24, edx7)
X86.HasAMXInt8 = isSet(25, edx7)
X86.HasAMXBF16 = isSet(22, edx7)
// These features depend on the second level of extended features.
if eax7 >= 1 {
eax71, _, _, edx71 := cpuid(7, 1)
if X86.HasAVX512 {
X86.HasAVX512BF16 = isSet(5, eax71)
}
if X86.HasAVX {
X86.HasAVXIFMA = isSet(23, eax71)
X86.HasAVXVNNI = isSet(4, eax71)
X86.HasAVXVNNIInt8 = isSet(4, edx71)
}
}
}
func isSet(bitpos uint, value uint32) bool {

36
vendor/golang.org/x/sys/unix/auxv.go generated vendored Normal file
View file

@ -0,0 +1,36 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos)
package unix
import (
"syscall"
"unsafe"
)
//go:linkname runtime_getAuxv runtime.getAuxv
func runtime_getAuxv() []uintptr
// Auxv returns the ELF auxiliary vector as a sequence of key/value pairs.
// The returned slice is always a fresh copy, owned by the caller.
// It returns an error on non-ELF platforms, or if the auxiliary vector cannot be accessed,
// which happens in some locked-down environments and build modes.
func Auxv() ([][2]uintptr, error) {
vec := runtime_getAuxv()
vecLen := len(vec)
if vecLen == 0 {
return nil, syscall.ENOENT
}
if vecLen%2 != 0 {
return nil, syscall.EINVAL
}
result := make([]uintptr, vecLen)
copy(result, vec)
return unsafe.Slice((*[2]uintptr)(unsafe.Pointer(&result[0])), vecLen/2), nil
}

13
vendor/golang.org/x/sys/unix/auxv_unsupported.go generated vendored Normal file
View file

@ -0,0 +1,13 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos)
package unix
import "syscall"
func Auxv() ([][2]uintptr, error) {
return nil, syscall.ENOTSUP
}

View file

@ -1102,3 +1102,90 @@ func (s *Strioctl) SetInt(i int) {
func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) {
return ioctlPtrRet(fd, req, unsafe.Pointer(s))
}
// Ucred Helpers
// See ucred(3c) and getpeerucred(3c)
//sys getpeerucred(fd uintptr, ucred *uintptr) (err error)
//sys ucredFree(ucred uintptr) = ucred_free
//sys ucredGet(pid int) (ucred uintptr, err error) = ucred_get
//sys ucredGeteuid(ucred uintptr) (uid int) = ucred_geteuid
//sys ucredGetegid(ucred uintptr) (gid int) = ucred_getegid
//sys ucredGetruid(ucred uintptr) (uid int) = ucred_getruid
//sys ucredGetrgid(ucred uintptr) (gid int) = ucred_getrgid
//sys ucredGetsuid(ucred uintptr) (uid int) = ucred_getsuid
//sys ucredGetsgid(ucred uintptr) (gid int) = ucred_getsgid
//sys ucredGetpid(ucred uintptr) (pid int) = ucred_getpid
// Ucred is an opaque struct that holds user credentials.
type Ucred struct {
ucred uintptr
}
// We need to ensure that ucredFree is called on the underlying ucred
// when the Ucred is garbage collected.
func ucredFinalizer(u *Ucred) {
ucredFree(u.ucred)
}
func GetPeerUcred(fd uintptr) (*Ucred, error) {
var ucred uintptr
err := getpeerucred(fd, &ucred)
if err != nil {
return nil, err
}
result := &Ucred{
ucred: ucred,
}
// set the finalizer on the result so that the ucred will be freed
runtime.SetFinalizer(result, ucredFinalizer)
return result, nil
}
func UcredGet(pid int) (*Ucred, error) {
ucred, err := ucredGet(pid)
if err != nil {
return nil, err
}
result := &Ucred{
ucred: ucred,
}
// set the finalizer on the result so that the ucred will be freed
runtime.SetFinalizer(result, ucredFinalizer)
return result, nil
}
func (u *Ucred) Geteuid() int {
defer runtime.KeepAlive(u)
return ucredGeteuid(u.ucred)
}
func (u *Ucred) Getruid() int {
defer runtime.KeepAlive(u)
return ucredGetruid(u.ucred)
}
func (u *Ucred) Getsuid() int {
defer runtime.KeepAlive(u)
return ucredGetsuid(u.ucred)
}
func (u *Ucred) Getegid() int {
defer runtime.KeepAlive(u)
return ucredGetegid(u.ucred)
}
func (u *Ucred) Getrgid() int {
defer runtime.KeepAlive(u)
return ucredGetrgid(u.ucred)
}
func (u *Ucred) Getsgid() int {
defer runtime.KeepAlive(u)
return ucredGetsgid(u.ucred)
}
func (u *Ucred) Getpid() int {
defer runtime.KeepAlive(u)
return ucredGetpid(u.ucred)
}

View file

@ -1245,6 +1245,7 @@ const (
FAN_REPORT_DFID_NAME = 0xc00
FAN_REPORT_DFID_NAME_TARGET = 0x1e00
FAN_REPORT_DIR_FID = 0x400
FAN_REPORT_FD_ERROR = 0x2000
FAN_REPORT_FID = 0x200
FAN_REPORT_NAME = 0x800
FAN_REPORT_PIDFD = 0x80
@ -1330,8 +1331,10 @@ const (
FUSE_SUPER_MAGIC = 0x65735546
FUTEXFS_SUPER_MAGIC = 0xbad1dea
F_ADD_SEALS = 0x409
F_CREATED_QUERY = 0x404
F_DUPFD = 0x0
F_DUPFD_CLOEXEC = 0x406
F_DUPFD_QUERY = 0x403
F_EXLCK = 0x4
F_GETFD = 0x1
F_GETFL = 0x3
@ -1551,6 +1554,7 @@ const (
IPPROTO_ROUTING = 0x2b
IPPROTO_RSVP = 0x2e
IPPROTO_SCTP = 0x84
IPPROTO_SMC = 0x100
IPPROTO_TCP = 0x6
IPPROTO_TP = 0x1d
IPPROTO_UDP = 0x11
@ -1623,6 +1627,8 @@ const (
IPV6_UNICAST_IF = 0x4c
IPV6_USER_FLOW = 0xe
IPV6_V6ONLY = 0x1a
IPV6_VERSION = 0x60
IPV6_VERSION_MASK = 0xf0
IPV6_XFRM_POLICY = 0x23
IP_ADD_MEMBERSHIP = 0x23
IP_ADD_SOURCE_MEMBERSHIP = 0x27
@ -1867,6 +1873,7 @@ const (
MADV_UNMERGEABLE = 0xd
MADV_WILLNEED = 0x3
MADV_WIPEONFORK = 0x12
MAP_DROPPABLE = 0x8
MAP_FILE = 0x0
MAP_FIXED = 0x10
MAP_FIXED_NOREPLACE = 0x100000
@ -1967,6 +1974,7 @@ const (
MSG_PEEK = 0x2
MSG_PROXY = 0x10
MSG_RST = 0x1000
MSG_SOCK_DEVMEM = 0x2000000
MSG_SYN = 0x400
MSG_TRUNC = 0x20
MSG_TRYHARD = 0x4
@ -2083,6 +2091,7 @@ const (
NFC_ATR_REQ_MAXSIZE = 0x40
NFC_ATR_RES_GB_MAXSIZE = 0x2f
NFC_ATR_RES_MAXSIZE = 0x40
NFC_ATS_MAXSIZE = 0x14
NFC_COMM_ACTIVE = 0x0
NFC_COMM_PASSIVE = 0x1
NFC_DEVICE_NAME_MAXSIZE = 0x8
@ -2163,6 +2172,7 @@ const (
NFNL_SUBSYS_QUEUE = 0x3
NFNL_SUBSYS_ULOG = 0x4
NFS_SUPER_MAGIC = 0x6969
NFT_BITWISE_BOOL = 0x0
NFT_CHAIN_FLAGS = 0x7
NFT_CHAIN_MAXNAMELEN = 0x100
NFT_CT_MAX = 0x17
@ -2491,6 +2501,7 @@ const (
PR_GET_PDEATHSIG = 0x2
PR_GET_SECCOMP = 0x15
PR_GET_SECUREBITS = 0x1b
PR_GET_SHADOW_STACK_STATUS = 0x4a
PR_GET_SPECULATION_CTRL = 0x34
PR_GET_TAGGED_ADDR_CTRL = 0x38
PR_GET_THP_DISABLE = 0x2a
@ -2499,6 +2510,7 @@ const (
PR_GET_TIMING = 0xd
PR_GET_TSC = 0x19
PR_GET_UNALIGN = 0x5
PR_LOCK_SHADOW_STACK_STATUS = 0x4c
PR_MCE_KILL = 0x21
PR_MCE_KILL_CLEAR = 0x0
PR_MCE_KILL_DEFAULT = 0x2
@ -2525,6 +2537,8 @@ const (
PR_PAC_GET_ENABLED_KEYS = 0x3d
PR_PAC_RESET_KEYS = 0x36
PR_PAC_SET_ENABLED_KEYS = 0x3c
PR_PMLEN_MASK = 0x7f000000
PR_PMLEN_SHIFT = 0x18
PR_PPC_DEXCR_CTRL_CLEAR = 0x4
PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10
PR_PPC_DEXCR_CTRL_EDITABLE = 0x1
@ -2592,6 +2606,7 @@ const (
PR_SET_PTRACER = 0x59616d61
PR_SET_SECCOMP = 0x16
PR_SET_SECUREBITS = 0x1c
PR_SET_SHADOW_STACK_STATUS = 0x4b
PR_SET_SPECULATION_CTRL = 0x35
PR_SET_SYSCALL_USER_DISPATCH = 0x3b
PR_SET_TAGGED_ADDR_CTRL = 0x37
@ -2602,6 +2617,9 @@ const (
PR_SET_UNALIGN = 0x6
PR_SET_VMA = 0x53564d41
PR_SET_VMA_ANON_NAME = 0x0
PR_SHADOW_STACK_ENABLE = 0x1
PR_SHADOW_STACK_PUSH = 0x4
PR_SHADOW_STACK_WRITE = 0x2
PR_SME_GET_VL = 0x40
PR_SME_SET_VL = 0x3f
PR_SME_SET_VL_ONEXEC = 0x40000
@ -2911,7 +2929,6 @@ const (
RTM_NEWNEXTHOP = 0x68
RTM_NEWNEXTHOPBUCKET = 0x74
RTM_NEWNSID = 0x58
RTM_NEWNVLAN = 0x70
RTM_NEWPREFIX = 0x34
RTM_NEWQDISC = 0x24
RTM_NEWROUTE = 0x18
@ -2920,6 +2937,7 @@ const (
RTM_NEWTCLASS = 0x28
RTM_NEWTFILTER = 0x2c
RTM_NEWTUNNEL = 0x78
RTM_NEWVLAN = 0x70
RTM_NR_FAMILIES = 0x1b
RTM_NR_MSGTYPES = 0x6c
RTM_SETDCB = 0x4f

View file

@ -116,6 +116,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@ -304,6 +306,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103

View file

@ -116,6 +116,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@ -305,6 +307,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103

View file

@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@ -310,6 +312,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103

View file

@ -109,6 +109,7 @@ const (
F_SETOWN = 0x8
F_UNLCK = 0x2
F_WRLCK = 0x1
GCS_MAGIC = 0x47435300
HIDIOCGRAWINFO = 0x80084803
HIDIOCGRDESC = 0x90044802
HIDIOCGRDESCSIZE = 0x80044801
@ -119,6 +120,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@ -302,6 +305,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103

View file

@ -116,6 +116,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@ -297,6 +299,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103

View file

@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xfffffff
IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@ -303,6 +305,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103

View file

@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xfffffff
IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@ -303,6 +305,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103

View file

@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@ -303,6 +305,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103

View file

@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@ -303,6 +305,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103

View file

@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xfffffff
IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x80
IUCLC = 0x1000
IXOFF = 0x400
@ -358,6 +360,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103

View file

@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xfffffff
IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x80
IUCLC = 0x1000
IXOFF = 0x400
@ -362,6 +364,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103

View file

@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x80
IUCLC = 0x1000
IXOFF = 0x400
@ -362,6 +364,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103

View file

@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@ -294,6 +296,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103

View file

@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xfffffff
IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@ -366,6 +368,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103

View file

@ -119,6 +119,8 @@ const (
IN_CLOEXEC = 0x400000
IN_NONBLOCK = 0x4000
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xfffffff
IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@ -357,6 +359,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x38
SCM_TIMESTAMPING_PKTINFO = 0x3c
SCM_TIMESTAMPNS = 0x21
SCM_TS_OPT_ID = 0x5a
SCM_TXTIME = 0x3f
SCM_WIFI_STATUS = 0x25
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103

View file

@ -141,6 +141,16 @@ import (
//go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so"
//go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so"
//go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so"
//go:cgo_import_dynamic libc_getpeerucred getpeerucred "libc.so"
//go:cgo_import_dynamic libc_ucred_get ucred_get "libc.so"
//go:cgo_import_dynamic libc_ucred_geteuid ucred_geteuid "libc.so"
//go:cgo_import_dynamic libc_ucred_getegid ucred_getegid "libc.so"
//go:cgo_import_dynamic libc_ucred_getruid ucred_getruid "libc.so"
//go:cgo_import_dynamic libc_ucred_getrgid ucred_getrgid "libc.so"
//go:cgo_import_dynamic libc_ucred_getsuid ucred_getsuid "libc.so"
//go:cgo_import_dynamic libc_ucred_getsgid ucred_getsgid "libc.so"
//go:cgo_import_dynamic libc_ucred_getpid ucred_getpid "libc.so"
//go:cgo_import_dynamic libc_ucred_free ucred_free "libc.so"
//go:cgo_import_dynamic libc_port_create port_create "libc.so"
//go:cgo_import_dynamic libc_port_associate port_associate "libc.so"
//go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so"
@ -280,6 +290,16 @@ import (
//go:linkname procgetpeername libc_getpeername
//go:linkname procsetsockopt libc_setsockopt
//go:linkname procrecvfrom libc_recvfrom
//go:linkname procgetpeerucred libc_getpeerucred
//go:linkname procucred_get libc_ucred_get
//go:linkname procucred_geteuid libc_ucred_geteuid
//go:linkname procucred_getegid libc_ucred_getegid
//go:linkname procucred_getruid libc_ucred_getruid
//go:linkname procucred_getrgid libc_ucred_getrgid
//go:linkname procucred_getsuid libc_ucred_getsuid
//go:linkname procucred_getsgid libc_ucred_getsgid
//go:linkname procucred_getpid libc_ucred_getpid
//go:linkname procucred_free libc_ucred_free
//go:linkname procport_create libc_port_create
//go:linkname procport_associate libc_port_associate
//go:linkname procport_dissociate libc_port_dissociate
@ -420,6 +440,16 @@ var (
procgetpeername,
procsetsockopt,
procrecvfrom,
procgetpeerucred,
procucred_get,
procucred_geteuid,
procucred_getegid,
procucred_getruid,
procucred_getrgid,
procucred_getsuid,
procucred_getsgid,
procucred_getpid,
procucred_free,
procport_create,
procport_associate,
procport_dissociate,
@ -2029,6 +2059,90 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getpeerucred(fd uintptr, ucred *uintptr) (err error) {
_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetpeerucred)), 2, uintptr(fd), uintptr(unsafe.Pointer(ucred)), 0, 0, 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ucredGet(pid int) (ucred uintptr, err error) {
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procucred_get)), 1, uintptr(pid), 0, 0, 0, 0, 0)
ucred = uintptr(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ucredGeteuid(ucred uintptr) (uid int) {
r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_geteuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
uid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ucredGetegid(ucred uintptr) (gid int) {
r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getegid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
gid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ucredGetruid(ucred uintptr) (uid int) {
r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getruid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
uid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ucredGetrgid(ucred uintptr) (gid int) {
r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getrgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
gid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ucredGetsuid(ucred uintptr) (uid int) {
r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
uid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ucredGetsgid(ucred uintptr) (gid int) {
r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
gid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ucredGetpid(ucred uintptr) (pid int) {
r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getpid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
pid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ucredFree(ucred uintptr) {
sysvicall6(uintptr(unsafe.Pointer(&procucred_free)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func port_create() (n int, err error) {
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0)
n = int(r0)

View file

@ -458,4 +458,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
SYS_SETXATTRAT = 463
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
)

View file

@ -381,4 +381,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
SYS_SETXATTRAT = 463
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
)

View file

@ -422,4 +422,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
SYS_SETXATTRAT = 463
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
)

View file

@ -325,4 +325,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
SYS_SETXATTRAT = 463
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
)

View file

@ -321,4 +321,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
SYS_SETXATTRAT = 463
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
)

View file

@ -442,4 +442,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 4460
SYS_LSM_LIST_MODULES = 4461
SYS_MSEAL = 4462
SYS_SETXATTRAT = 4463
SYS_GETXATTRAT = 4464
SYS_LISTXATTRAT = 4465
SYS_REMOVEXATTRAT = 4466
)

View file

@ -372,4 +372,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 5460
SYS_LSM_LIST_MODULES = 5461
SYS_MSEAL = 5462
SYS_SETXATTRAT = 5463
SYS_GETXATTRAT = 5464
SYS_LISTXATTRAT = 5465
SYS_REMOVEXATTRAT = 5466
)

View file

@ -372,4 +372,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 5460
SYS_LSM_LIST_MODULES = 5461
SYS_MSEAL = 5462
SYS_SETXATTRAT = 5463
SYS_GETXATTRAT = 5464
SYS_LISTXATTRAT = 5465
SYS_REMOVEXATTRAT = 5466
)

View file

@ -442,4 +442,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 4460
SYS_LSM_LIST_MODULES = 4461
SYS_MSEAL = 4462
SYS_SETXATTRAT = 4463
SYS_GETXATTRAT = 4464
SYS_LISTXATTRAT = 4465
SYS_REMOVEXATTRAT = 4466
)

View file

@ -449,4 +449,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
SYS_SETXATTRAT = 463
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
)

View file

@ -421,4 +421,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
SYS_SETXATTRAT = 463
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
)

View file

@ -421,4 +421,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
SYS_SETXATTRAT = 463
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
)

View file

@ -326,4 +326,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
SYS_SETXATTRAT = 463
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
)

View file

@ -387,4 +387,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
SYS_SETXATTRAT = 463
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
)

View file

@ -400,4 +400,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
SYS_SETXATTRAT = 463
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
)

View file

@ -4747,7 +4747,7 @@ const (
NL80211_ATTR_MAC_HINT = 0xc8
NL80211_ATTR_MAC_MASK = 0xd7
NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca
NL80211_ATTR_MAX = 0x14c
NL80211_ATTR_MAX = 0x14d
NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4
NL80211_ATTR_MAX_CSA_COUNTERS = 0xce
NL80211_ATTR_MAX_MATCH_SETS = 0x85
@ -5519,7 +5519,7 @@ const (
NL80211_MNTR_FLAG_CONTROL = 0x3
NL80211_MNTR_FLAG_COOK_FRAMES = 0x5
NL80211_MNTR_FLAG_FCSFAIL = 0x1
NL80211_MNTR_FLAG_MAX = 0x6
NL80211_MNTR_FLAG_MAX = 0x7
NL80211_MNTR_FLAG_OTHER_BSS = 0x4
NL80211_MNTR_FLAG_PLCPFAIL = 0x2
NL80211_MPATH_FLAG_ACTIVE = 0x1
@ -6174,3 +6174,5 @@ type SockDiagReq struct {
Family uint8
Protocol uint8
}
const RTM_NEWNVLAN = 0x70

View file

@ -405,8 +405,15 @@ func (limit Limit) durationFromTokens(tokens float64) time.Duration {
if limit <= 0 {
return InfDuration
}
seconds := tokens / float64(limit)
return time.Duration(float64(time.Second) * seconds)
duration := (tokens / float64(limit)) * float64(time.Second)
// Cap the duration to the maximum representable int64 value, to avoid overflow.
if duration > float64(math.MaxInt64) {
return InfDuration
}
return time.Duration(duration)
}
// tokensFromDuration is a unit conversion function from a time duration to the number of tokens

View file

@ -344,7 +344,12 @@ func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (r
}
// UsesImport reports whether a given import is used.
// The provided File must have been parsed with syntactic object resolution
// (not using go/parser.SkipObjectResolution).
func UsesImport(f *ast.File, path string) (used bool) {
if f.Scope == nil {
panic("file f was not parsed with syntactic object resolution")
}
spec := importSpec(f, path)
if spec == nil {
return

View file

@ -28,7 +28,7 @@ import (
"golang.org/x/tools/internal/event/label"
)
// An Runner will run go command invocations and serialize
// A Runner will run go command invocations and serialize
// them if it sees a concurrency error.
type Runner struct {
// once guards the runner initialization.
@ -179,7 +179,7 @@ type Invocation struct {
CleanEnv bool
Env []string
WorkingDir string
Logf func(format string, args ...interface{})
Logf func(format string, args ...any)
}
// Postcondition: both error results have same nilness.
@ -388,7 +388,9 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) {
case err := <-resChan:
return err
case <-timer.C:
HandleHangingGoCommand(startTime, cmd)
// HandleHangingGoCommand terminates this process.
// Pass off resChan in case we can collect the command error.
handleHangingGoCommand(startTime, cmd, resChan)
case <-ctx.Done():
}
} else {
@ -413,8 +415,6 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) {
}
// Didn't shut down in response to interrupt. Kill it hard.
// TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT
// on certain platforms, such as unix.
if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug {
log.Printf("error killing the Go command: %v", err)
}
@ -422,15 +422,17 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) {
return <-resChan
}
func HandleHangingGoCommand(start time.Time, cmd *exec.Cmd) {
// handleHangingGoCommand outputs debugging information to help diagnose the
// cause of a hanging Go command, and then exits with log.Fatalf.
func handleHangingGoCommand(start time.Time, cmd *exec.Cmd, resChan chan error) {
switch runtime.GOOS {
case "linux", "darwin", "freebsd", "netbsd":
case "linux", "darwin", "freebsd", "netbsd", "openbsd":
fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND
The gopls test runner has detected a hanging go command. In order to debug
this, the output of ps and lsof/fstat is printed below.
The gopls test runner has detected a hanging go command. In order to debug
this, the output of ps and lsof/fstat is printed below.
See golang/go#54461 for more details.`)
See golang/go#54461 for more details.`)
fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:")
fmt.Fprintln(os.Stderr, "-------------------------")
@ -438,7 +440,7 @@ See golang/go#54461 for more details.`)
psCmd.Stdout = os.Stderr
psCmd.Stderr = os.Stderr
if err := psCmd.Run(); err != nil {
panic(fmt.Sprintf("running ps: %v", err))
log.Printf("Handling hanging Go command: running ps: %v", err)
}
listFiles := "lsof"
@ -452,10 +454,24 @@ See golang/go#54461 for more details.`)
listFilesCmd.Stdout = os.Stderr
listFilesCmd.Stderr = os.Stderr
if err := listFilesCmd.Run(); err != nil {
panic(fmt.Sprintf("running %s: %v", listFiles, err))
log.Printf("Handling hanging Go command: running %s: %v", listFiles, err)
}
// Try to extract information about the slow go process by issuing a SIGQUIT.
if err := cmd.Process.Signal(sigStuckProcess); err == nil {
select {
case err := <-resChan:
stderr := "not a bytes.Buffer"
if buf, _ := cmd.Stderr.(*bytes.Buffer); buf != nil {
stderr = buf.String()
}
log.Printf("Quit hanging go command:\n\terr:%v\n\tstderr:\n%v\n\n", err, stderr)
case <-time.After(5 * time.Second):
}
} else {
log.Printf("Sending signal %d to hanging go command: %v", sigStuckProcess, err)
}
}
panic(fmt.Sprintf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid))
log.Fatalf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid)
}
func cmdDebugStr(cmd *exec.Cmd) string {

View file

@ -0,0 +1,13 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !unix
package gocommand
import "os"
// sigStuckProcess is the signal to send to kill a hanging subprocess.
// On Unix we send SIGQUIT, but on non-Unix we only have os.Kill.
var sigStuckProcess = os.Kill

View file

@ -0,0 +1,13 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build unix
package gocommand
import "syscall"
// Sigstuckprocess is the signal to send to kill a hanging subprocess.
// Send SIGQUIT to get a stack trace.
var sigStuckProcess = syscall.SIGQUIT

View file

@ -27,7 +27,6 @@ import (
"unicode"
"unicode/utf8"
"golang.org/x/sync/errgroup"
"golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/internal/event"
"golang.org/x/tools/internal/gocommand"
@ -91,18 +90,6 @@ type ImportFix struct {
Relevance float64 // see pkg
}
// An ImportInfo represents a single import statement.
type ImportInfo struct {
ImportPath string // import path, e.g. "crypto/rand".
Name string // import name, e.g. "crand", or "" if none.
}
// A packageInfo represents what's known about a package.
type packageInfo struct {
name string // real package name, if known.
exports map[string]bool // known exports.
}
// parseOtherFiles parses all the Go files in srcDir except filename, including
// test files if filename looks like a test.
//
@ -162,8 +149,8 @@ func addGlobals(f *ast.File, globals map[string]bool) {
// collectReferences builds a map of selector expressions, from
// left hand side (X) to a set of right hand sides (Sel).
func collectReferences(f *ast.File) references {
refs := references{}
func collectReferences(f *ast.File) References {
refs := References{}
var visitor visitFn
visitor = func(node ast.Node) ast.Visitor {
@ -233,7 +220,7 @@ func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo {
allFound := true
for right := range syms {
if !pkgInfo.exports[right] {
if !pkgInfo.Exports[right] {
allFound = false
break
}
@ -246,11 +233,6 @@ func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo {
return nil
}
// references is set of references found in a Go file. The first map key is the
// left hand side of a selector expression, the second key is the right hand
// side, and the value should always be true.
type references map[string]map[string]bool
// A pass contains all the inputs and state necessary to fix a file's imports.
// It can be modified in some ways during use; see comments below.
type pass struct {
@ -258,27 +240,29 @@ type pass struct {
fset *token.FileSet // fset used to parse f and its siblings.
f *ast.File // the file being fixed.
srcDir string // the directory containing f.
env *ProcessEnv // the environment to use for go commands, etc.
loadRealPackageNames bool // if true, load package names from disk rather than guessing them.
otherFiles []*ast.File // sibling files.
logf func(string, ...any)
source Source // the environment to use for go commands, etc.
loadRealPackageNames bool // if true, load package names from disk rather than guessing them.
otherFiles []*ast.File // sibling files.
goroot string
// Intermediate state, generated by load.
existingImports map[string][]*ImportInfo
allRefs references
missingRefs references
allRefs References
missingRefs References
// Inputs to fix. These can be augmented between successive fix calls.
lastTry bool // indicates that this is the last call and fix should clean up as best it can.
candidates []*ImportInfo // candidate imports in priority order.
knownPackages map[string]*packageInfo // information about all known packages.
knownPackages map[string]*PackageInfo // information about all known packages.
}
// loadPackageNames saves the package names for everything referenced by imports.
func (p *pass) loadPackageNames(imports []*ImportInfo) error {
if p.env.Logf != nil {
p.env.Logf("loading package names for %v packages", len(imports))
func (p *pass) loadPackageNames(ctx context.Context, imports []*ImportInfo) error {
if p.logf != nil {
p.logf("loading package names for %v packages", len(imports))
defer func() {
p.env.Logf("done loading package names for %v packages", len(imports))
p.logf("done loading package names for %v packages", len(imports))
}()
}
var unknown []string
@ -289,20 +273,17 @@ func (p *pass) loadPackageNames(imports []*ImportInfo) error {
unknown = append(unknown, imp.ImportPath)
}
resolver, err := p.env.GetResolver()
if err != nil {
return err
}
names, err := resolver.loadPackageNames(unknown, p.srcDir)
names, err := p.source.LoadPackageNames(ctx, p.srcDir, unknown)
if err != nil {
return err
}
// TODO(rfindley): revisit this. Why do we need to store known packages with
// no exports? The inconsistent data is confusing.
for path, name := range names {
p.knownPackages[path] = &packageInfo{
name: name,
exports: map[string]bool{},
p.knownPackages[path] = &PackageInfo{
Name: name,
Exports: map[string]bool{},
}
}
return nil
@ -330,8 +311,8 @@ func (p *pass) importIdentifier(imp *ImportInfo) string {
return imp.Name
}
known := p.knownPackages[imp.ImportPath]
if known != nil && known.name != "" {
return withoutVersion(known.name)
if known != nil && known.Name != "" {
return withoutVersion(known.Name)
}
return ImportPathToAssumedName(imp.ImportPath)
}
@ -339,9 +320,9 @@ func (p *pass) importIdentifier(imp *ImportInfo) string {
// load reads in everything necessary to run a pass, and reports whether the
// file already has all the imports it needs. It fills in p.missingRefs with the
// file's missing symbols, if any, or removes unused imports if not.
func (p *pass) load() ([]*ImportFix, bool) {
p.knownPackages = map[string]*packageInfo{}
p.missingRefs = references{}
func (p *pass) load(ctx context.Context) ([]*ImportFix, bool) {
p.knownPackages = map[string]*PackageInfo{}
p.missingRefs = References{}
p.existingImports = map[string][]*ImportInfo{}
// Load basic information about the file in question.
@ -364,9 +345,11 @@ func (p *pass) load() ([]*ImportFix, bool) {
// f's imports by the identifier they introduce.
imports := collectImports(p.f)
if p.loadRealPackageNames {
err := p.loadPackageNames(append(imports, p.candidates...))
err := p.loadPackageNames(ctx, append(imports, p.candidates...))
if err != nil {
p.env.logf("loading package names: %v", err)
if p.logf != nil {
p.logf("loading package names: %v", err)
}
return nil, false
}
}
@ -535,9 +518,10 @@ func (p *pass) assumeSiblingImportsValid() {
// We have the stdlib in memory; no need to guess.
rights = symbolNameSet(m)
}
p.addCandidate(imp, &packageInfo{
// TODO(rfindley): we should set package name here, for consistency.
p.addCandidate(imp, &PackageInfo{
// no name; we already know it.
exports: rights,
Exports: rights,
})
}
}
@ -546,14 +530,14 @@ func (p *pass) assumeSiblingImportsValid() {
// addCandidate adds a candidate import to p, and merges in the information
// in pkg.
func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) {
func (p *pass) addCandidate(imp *ImportInfo, pkg *PackageInfo) {
p.candidates = append(p.candidates, imp)
if existing, ok := p.knownPackages[imp.ImportPath]; ok {
if existing.name == "" {
existing.name = pkg.name
if existing.Name == "" {
existing.Name = pkg.Name
}
for export := range pkg.exports {
existing.exports[export] = true
for export := range pkg.Exports {
existing.Exports[export] = true
}
} else {
p.knownPackages[imp.ImportPath] = pkg
@ -581,19 +565,42 @@ func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *P
// getFixes gets the import fixes that need to be made to f in order to fix the imports.
// It does not modify the ast.
func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) {
source, err := NewProcessEnvSource(env, filename, f.Name.Name)
if err != nil {
return nil, err
}
goEnv, err := env.goEnv()
if err != nil {
return nil, err
}
return getFixesWithSource(ctx, fset, f, filename, goEnv["GOROOT"], env.logf, source)
}
func getFixesWithSource(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, goroot string, logf func(string, ...any), source Source) ([]*ImportFix, error) {
// This logic is defensively duplicated from getFixes.
abs, err := filepath.Abs(filename)
if err != nil {
return nil, err
}
srcDir := filepath.Dir(abs)
env.logf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir)
if logf != nil {
logf("fixImports(filename=%q), srcDir=%q ...", filename, abs, srcDir)
}
// First pass: looking only at f, and using the naive algorithm to
// derive package names from import paths, see if the file is already
// complete. We can't add any imports yet, because we don't know
// if missing references are actually package vars.
p := &pass{fset: fset, f: f, srcDir: srcDir, env: env}
if fixes, done := p.load(); done {
p := &pass{
fset: fset,
f: f,
srcDir: srcDir,
logf: logf,
goroot: goroot,
source: source,
}
if fixes, done := p.load(ctx); done {
return fixes, nil
}
@ -605,7 +612,7 @@ func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename st
// Second pass: add information from other files in the same package,
// like their package vars and imports.
p.otherFiles = otherFiles
if fixes, done := p.load(); done {
if fixes, done := p.load(ctx); done {
return fixes, nil
}
@ -618,10 +625,17 @@ func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename st
// Third pass: get real package names where we had previously used
// the naive algorithm.
p = &pass{fset: fset, f: f, srcDir: srcDir, env: env}
p = &pass{
fset: fset,
f: f,
srcDir: srcDir,
logf: logf,
goroot: goroot,
source: p.source, // safe to reuse, as it's just a wrapper around env
}
p.loadRealPackageNames = true
p.otherFiles = otherFiles
if fixes, done := p.load(); done {
if fixes, done := p.load(ctx); done {
return fixes, nil
}
@ -766,7 +780,7 @@ func GetAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix
return true
},
dirFound: func(pkg *pkg) bool {
if !canUse(filename, pkg.dir) {
if !CanUse(filename, pkg.dir) {
return false
}
// Try the assumed package name first, then a simpler path match
@ -801,7 +815,7 @@ func GetImportPaths(ctx context.Context, wrapped func(ImportFix), searchPrefix,
return true
},
dirFound: func(pkg *pkg) bool {
if !canUse(filename, pkg.dir) {
if !CanUse(filename, pkg.dir) {
return false
}
return strings.HasPrefix(pkg.importPathShort, searchPrefix)
@ -835,7 +849,7 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP
return true
},
dirFound: func(pkg *pkg) bool {
return pkgIsCandidate(filename, references{searchPkg: nil}, pkg)
return pkgIsCandidate(filename, References{searchPkg: nil}, pkg)
},
packageNameLoaded: func(pkg *pkg) bool {
return pkg.packageName == searchPkg
@ -913,7 +927,7 @@ type ProcessEnv struct {
WorkingDir string
// If Logf is non-nil, debug logging is enabled through this function.
Logf func(format string, args ...interface{})
Logf func(format string, args ...any)
// If set, ModCache holds a shared cache of directory info to use across
// multiple ProcessEnvs.
@ -1086,11 +1100,7 @@ func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string)
return e.GocmdRunner.Run(ctx, inv)
}
func addStdlibCandidates(pass *pass, refs references) error {
goenv, err := pass.env.goEnv()
if err != nil {
return err
}
func addStdlibCandidates(pass *pass, refs References) error {
localbase := func(nm string) string {
ans := path.Base(nm)
if ans[0] == 'v' {
@ -1105,13 +1115,13 @@ func addStdlibCandidates(pass *pass, refs references) error {
}
add := func(pkg string) {
// Prevent self-imports.
if path.Base(pkg) == pass.f.Name.Name && filepath.Join(goenv["GOROOT"], "src", pkg) == pass.srcDir {
if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.goroot, "src", pkg) == pass.srcDir {
return
}
exports := symbolNameSet(stdlib.PackageSymbols[pkg])
pass.addCandidate(
&ImportInfo{ImportPath: pkg},
&packageInfo{name: localbase(pkg), exports: exports})
&PackageInfo{Name: localbase(pkg), Exports: exports})
}
for left := range refs {
if left == "rand" {
@ -1122,6 +1132,9 @@ func addStdlibCandidates(pass *pass, refs references) error {
// but we have no way of figuring out what the user is using
// TODO: investigate using the toolchain version to disambiguate in the stdlib
add("math/rand/v2")
// math/rand has an overlapping API
// TestIssue66407 fails without this
add("math/rand")
continue
}
for importPath := range stdlib.PackageSymbols {
@ -1175,91 +1188,14 @@ type scanCallback struct {
exportsLoaded func(pkg *pkg, exports []stdlib.Symbol)
}
func addExternalCandidates(ctx context.Context, pass *pass, refs references, filename string) error {
func addExternalCandidates(ctx context.Context, pass *pass, refs References, filename string) error {
ctx, done := event.Start(ctx, "imports.addExternalCandidates")
defer done()
var mu sync.Mutex
found := make(map[string][]pkgDistance)
callback := &scanCallback{
rootFound: func(gopathwalk.Root) bool {
return true // We want everything.
},
dirFound: func(pkg *pkg) bool {
return pkgIsCandidate(filename, refs, pkg)
},
packageNameLoaded: func(pkg *pkg) bool {
if _, want := refs[pkg.packageName]; !want {
return false
}
if pkg.dir == pass.srcDir && pass.f.Name.Name == pkg.packageName {
// The candidate is in the same directory and has the
// same package name. Don't try to import ourselves.
return false
}
if !canUse(filename, pkg.dir) {
return false
}
mu.Lock()
defer mu.Unlock()
found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(pass.srcDir, pkg.dir)})
return false // We'll do our own loading after we sort.
},
}
resolver, err := pass.env.GetResolver()
results, err := pass.source.ResolveReferences(ctx, filename, refs)
if err != nil {
return err
}
if err = resolver.scan(ctx, callback); err != nil {
return err
}
// Search for imports matching potential package references.
type result struct {
imp *ImportInfo
pkg *packageInfo
}
results := make([]*result, len(refs))
g, ctx := errgroup.WithContext(ctx)
searcher := symbolSearcher{
logf: pass.env.logf,
srcDir: pass.srcDir,
xtest: strings.HasSuffix(pass.f.Name.Name, "_test"),
loadExports: resolver.loadExports,
}
i := 0
for pkgName, symbols := range refs {
index := i // claim an index in results
i++
pkgName := pkgName
symbols := symbols
g.Go(func() error {
found, err := searcher.search(ctx, found[pkgName], pkgName, symbols)
if err != nil {
return err
}
if found == nil {
return nil // No matching package.
}
imp := &ImportInfo{
ImportPath: found.importPathShort,
}
pkg := &packageInfo{
name: pkgName,
exports: symbols,
}
results[index] = &result{imp, pkg}
return nil
})
}
if err := g.Wait(); err != nil {
return err
}
for _, result := range results {
if result == nil {
@ -1267,7 +1203,7 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil
}
// Don't offer completions that would shadow predeclared
// names, such as github.com/coreos/etcd/error.
if types.Universe.Lookup(result.pkg.name) != nil { // predeclared
if types.Universe.Lookup(result.Package.Name) != nil { // predeclared
// Ideally we would skip this candidate only
// if the predeclared name is actually
// referenced by the file, but that's a lot
@ -1276,7 +1212,7 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil
// user before long.
continue
}
pass.addCandidate(result.imp, result.pkg)
pass.addCandidate(result.Import, result.Package)
}
return nil
}
@ -1801,9 +1737,9 @@ func (s *symbolSearcher) searchOne(ctx context.Context, c pkgDistance, symbols m
// filename is the file being formatted.
// pkgIdent is the package being searched for, like "client" (if
// searching for "client.New")
func pkgIsCandidate(filename string, refs references, pkg *pkg) bool {
func pkgIsCandidate(filename string, refs References, pkg *pkg) bool {
// Check "internal" and "vendor" visibility:
if !canUse(filename, pkg.dir) {
if !CanUse(filename, pkg.dir) {
return false
}
@ -1826,9 +1762,9 @@ func pkgIsCandidate(filename string, refs references, pkg *pkg) bool {
return false
}
// canUse reports whether the package in dir is usable from filename,
// CanUse reports whether the package in dir is usable from filename,
// respecting the Go "internal" and "vendor" visibility rules.
func canUse(filename, dir string) bool {
func CanUse(filename, dir string) bool {
// Fast path check, before any allocations. If it doesn't contain vendor
// or internal, it's not tricky:
// Note that this can false-negative on directories like "notinternal",

View file

@ -47,7 +47,14 @@ type Options struct {
// Process implements golang.org/x/tools/imports.Process with explicit context in opt.Env.
func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) {
fileSet := token.NewFileSet()
file, adjust, err := parse(fileSet, filename, src, opt)
var parserMode parser.Mode
if opt.Comments {
parserMode |= parser.ParseComments
}
if opt.AllErrors {
parserMode |= parser.AllErrors
}
file, adjust, err := parse(fileSet, filename, src, parserMode, opt.Fragment)
if err != nil {
return nil, err
}
@ -66,17 +73,19 @@ func Process(filename string, src []byte, opt *Options) (formatted []byte, err e
//
// Note that filename's directory influences which imports can be chosen,
// so it is important that filename be accurate.
func FixImports(ctx context.Context, filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) {
func FixImports(ctx context.Context, filename string, src []byte, goroot string, logf func(string, ...any), source Source) (fixes []*ImportFix, err error) {
ctx, done := event.Start(ctx, "imports.FixImports")
defer done()
fileSet := token.NewFileSet()
file, _, err := parse(fileSet, filename, src, opt)
// TODO(rfindley): these default values for ParseComments and AllErrors were
// extracted from gopls, but are they even needed?
file, _, err := parse(fileSet, filename, src, parser.ParseComments|parser.AllErrors, true)
if err != nil {
return nil, err
}
return getFixes(ctx, fileSet, file, filename, opt.Env)
return getFixesWithSource(ctx, fileSet, file, filename, goroot, logf, source)
}
// ApplyFixes applies all of the fixes to the file and formats it. extraMode
@ -114,7 +123,7 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e
// formatted file, and returns the postpocessed result.
func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) {
mergeImports(file)
sortImports(opt.LocalPrefix, fset.File(file.Pos()), file)
sortImports(opt.LocalPrefix, fset.File(file.FileStart), file)
var spacesBefore []string // import paths we need spaces before
for _, impSection := range astutil.Imports(fset, file) {
// Within each block of contiguous imports, see if any
@ -164,13 +173,9 @@ func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(ori
// parse parses src, which was read from filename,
// as a Go source file or statement list.
func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) {
var parserMode parser.Mode // legacy ast.Object resolution is required here
if opt.Comments {
parserMode |= parser.ParseComments
}
if opt.AllErrors {
parserMode |= parser.AllErrors
func parse(fset *token.FileSet, filename string, src []byte, parserMode parser.Mode, fragment bool) (*ast.File, func(orig, src []byte) []byte, error) {
if parserMode&parser.SkipObjectResolution != 0 {
panic("legacy ast.Object resolution is required")
}
// Try as whole source file.
@ -181,7 +186,7 @@ func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast
// If the error is that the source file didn't begin with a
// package line and we accept fragmented input, fall through to
// try as a source fragment. Stop and return on any other error.
if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") {
if !fragment || !strings.Contains(err.Error(), "expected 'package'") {
return nil, nil, err
}

63
vendor/golang.org/x/tools/internal/imports/source.go generated vendored Normal file
View file

@ -0,0 +1,63 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package imports
import "context"
// These types document the APIs below.
//
// TODO(rfindley): consider making these defined types rather than aliases.
type (
ImportPath = string
PackageName = string
Symbol = string
// References is set of References found in a Go file. The first map key is the
// left hand side of a selector expression, the second key is the right hand
// side, and the value should always be true.
References = map[PackageName]map[Symbol]bool
)
// A Result satisfies a missing import.
//
// The Import field describes the missing import spec, and the Package field
// summarizes the package exports.
type Result struct {
Import *ImportInfo
Package *PackageInfo
}
// An ImportInfo represents a single import statement.
type ImportInfo struct {
ImportPath string // import path, e.g. "crypto/rand".
Name string // import name, e.g. "crand", or "" if none.
}
// A PackageInfo represents what's known about a package.
type PackageInfo struct {
Name string // package name in the package declaration, if known
Exports map[string]bool // set of names of known package level sortSymbols
}
// A Source provides imports to satisfy unresolved references in the file being
// fixed.
type Source interface {
// LoadPackageNames queries PackageName information for the requested import
// paths, when operating from the provided srcDir.
//
// TODO(rfindley): try to refactor to remove this operation.
LoadPackageNames(ctx context.Context, srcDir string, paths []ImportPath) (map[ImportPath]PackageName, error)
// ResolveReferences asks the Source for the best package name to satisfy
// each of the missing references, in the context of fixing the given
// filename.
//
// Returns a map from package name to a [Result] for that package name that
// provides the required symbols. Keys may be omitted in the map if no
// candidates satisfy all missing references for that package name. It is up
// to each data source to select the best result for each entry in the
// missing map.
ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error)
}

View file

@ -0,0 +1,129 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package imports
import (
"context"
"path/filepath"
"strings"
"sync"
"golang.org/x/sync/errgroup"
"golang.org/x/tools/internal/gopathwalk"
)
// ProcessEnvSource implements the [Source] interface using the legacy
// [ProcessEnv] abstraction.
type ProcessEnvSource struct {
env *ProcessEnv
srcDir string
filename string
pkgName string
}
// NewProcessEnvSource returns a [ProcessEnvSource] wrapping the given
// env, to be used for fixing imports in the file with name filename in package
// named pkgName.
func NewProcessEnvSource(env *ProcessEnv, filename, pkgName string) (*ProcessEnvSource, error) {
abs, err := filepath.Abs(filename)
if err != nil {
return nil, err
}
srcDir := filepath.Dir(abs)
return &ProcessEnvSource{
env: env,
srcDir: srcDir,
filename: filename,
pkgName: pkgName,
}, nil
}
func (s *ProcessEnvSource) LoadPackageNames(ctx context.Context, srcDir string, unknown []string) (map[string]string, error) {
r, err := s.env.GetResolver()
if err != nil {
return nil, err
}
return r.loadPackageNames(unknown, srcDir)
}
func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename string, refs map[string]map[string]bool) ([]*Result, error) {
var mu sync.Mutex
found := make(map[string][]pkgDistance)
callback := &scanCallback{
rootFound: func(gopathwalk.Root) bool {
return true // We want everything.
},
dirFound: func(pkg *pkg) bool {
return pkgIsCandidate(filename, refs, pkg)
},
packageNameLoaded: func(pkg *pkg) bool {
if _, want := refs[pkg.packageName]; !want {
return false
}
if pkg.dir == s.srcDir && s.pkgName == pkg.packageName {
// The candidate is in the same directory and has the
// same package name. Don't try to import ourselves.
return false
}
if !CanUse(filename, pkg.dir) {
return false
}
mu.Lock()
defer mu.Unlock()
found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(s.srcDir, pkg.dir)})
return false // We'll do our own loading after we sort.
},
}
resolver, err := s.env.GetResolver()
if err != nil {
return nil, err
}
if err := resolver.scan(ctx, callback); err != nil {
return nil, err
}
g, ctx := errgroup.WithContext(ctx)
searcher := symbolSearcher{
logf: s.env.logf,
srcDir: s.srcDir,
xtest: strings.HasSuffix(s.pkgName, "_test"),
loadExports: resolver.loadExports,
}
var resultMu sync.Mutex
results := make(map[string]*Result, len(refs))
for pkgName, symbols := range refs {
g.Go(func() error {
found, err := searcher.search(ctx, found[pkgName], pkgName, symbols)
if err != nil {
return err
}
if found == nil {
return nil // No matching package.
}
imp := &ImportInfo{
ImportPath: found.importPathShort,
}
pkg := &PackageInfo{
Name: pkgName,
Exports: symbols,
}
resultMu.Lock()
results[pkgName] = &Result{Import: imp, Package: pkg}
resultMu.Unlock()
return nil
})
}
if err := g.Wait(); err != nil {
return nil, err
}
var ans []*Result
for _, x := range results {
ans = append(ans, x)
}
return ans, nil
}

View file

@ -0,0 +1,103 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package imports
import (
"context"
"sync"
"time"
"golang.org/x/tools/internal/modindex"
)
// This code is here rather than in the modindex package
// to avoid import loops
// implements Source using modindex, so only for module cache.
//
// this is perhaps over-engineered. A new Index is read at first use.
// And then Update is called after every 15 minutes, and a new Index
// is read if the index changed. It is not clear the Mutex is needed.
type IndexSource struct {
modcachedir string
mutex sync.Mutex
ix *modindex.Index
expires time.Time
}
// create a new Source. Called from NewView in cache/session.go.
func NewIndexSource(cachedir string) *IndexSource {
return &IndexSource{modcachedir: cachedir}
}
func (s *IndexSource) LoadPackageNames(ctx context.Context, srcDir string, paths []ImportPath) (map[ImportPath]PackageName, error) {
/// This is used by goimports to resolve the package names of imports of the
// current package, which is irrelevant for the module cache.
return nil, nil
}
func (s *IndexSource) ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error) {
if err := s.maybeReadIndex(); err != nil {
return nil, err
}
var cs []modindex.Candidate
for pkg, nms := range missing {
for nm := range nms {
x := s.ix.Lookup(pkg, nm, false)
cs = append(cs, x...)
}
}
found := make(map[string]*Result)
for _, c := range cs {
var x *Result
if x = found[c.ImportPath]; x == nil {
x = &Result{
Import: &ImportInfo{
ImportPath: c.ImportPath,
Name: "",
},
Package: &PackageInfo{
Name: c.PkgName,
Exports: make(map[string]bool),
},
}
found[c.ImportPath] = x
}
x.Package.Exports[c.Name] = true
}
var ans []*Result
for _, x := range found {
ans = append(ans, x)
}
return ans, nil
}
func (s *IndexSource) maybeReadIndex() error {
s.mutex.Lock()
defer s.mutex.Unlock()
var readIndex bool
if time.Now().After(s.expires) {
ok, err := modindex.Update(s.modcachedir)
if err != nil {
return err
}
if ok {
readIndex = true
}
}
if readIndex || s.ix == nil {
ix, err := modindex.ReadIndex(s.modcachedir)
if err != nil {
return err
}
s.ix = ix
// for now refresh every 15 minutes
s.expires = time.Now().Add(time.Minute * 15)
}
return nil
}

View file

@ -0,0 +1,135 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package modindex
import (
"fmt"
"log"
"os"
"path/filepath"
"regexp"
"slices"
"strings"
"sync"
"time"
"golang.org/x/mod/semver"
"golang.org/x/tools/internal/gopathwalk"
)
type directory struct {
path Relpath
importPath string
version string // semantic version
syms []symbol
}
// filterDirs groups the directories by import path,
// sorting the ones with the same import path by semantic version,
// most recent first.
func byImportPath(dirs []Relpath) (map[string][]*directory, error) {
ans := make(map[string][]*directory) // key is import path
for _, d := range dirs {
ip, sv, err := DirToImportPathVersion(d)
if err != nil {
return nil, err
}
ans[ip] = append(ans[ip], &directory{
path: d,
importPath: ip,
version: sv,
})
}
for k, v := range ans {
semanticSort(v)
ans[k] = v
}
return ans, nil
}
// sort the directories by semantic version, latest first
func semanticSort(v []*directory) {
slices.SortFunc(v, func(l, r *directory) int {
if n := semver.Compare(l.version, r.version); n != 0 {
return -n // latest first
}
return strings.Compare(string(l.path), string(r.path))
})
}
// modCacheRegexp splits a relpathpath into module, module version, and package.
var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
// DirToImportPathVersion computes import path and semantic version
func DirToImportPathVersion(dir Relpath) (string, string, error) {
m := modCacheRegexp.FindStringSubmatch(string(dir))
// m[1] is the module path
// m[2] is the version major.minor.patch(-<pre release identifier)
// m[3] is the rest of the package path
if len(m) != 4 {
return "", "", fmt.Errorf("bad dir %s", dir)
}
if !semver.IsValid(m[2]) {
return "", "", fmt.Errorf("bad semantic version %s", m[2])
}
// ToSlash is required for Windows.
return filepath.ToSlash(m[1] + m[3]), m[2], nil
}
// a region controls what directories to look at, for
// updating the index incrementally, and for testing that.
// (for testing one builds an index as of A, incrementally
// updates it to B, and compares the result to an index build
// as of B.)
type region struct {
onlyAfter, onlyBefore time.Time
sync.Mutex
ans []Relpath
}
func findDirs(root string, onlyAfter, onlyBefore time.Time) []Relpath {
roots := []gopathwalk.Root{{Path: root, Type: gopathwalk.RootModuleCache}}
// TODO(PJW): adjust concurrency
opts := gopathwalk.Options{ModulesEnabled: true, Concurrency: 1 /* ,Logf: log.Printf*/}
betw := &region{
onlyAfter: onlyAfter,
onlyBefore: onlyBefore,
}
gopathwalk.WalkSkip(roots, betw.addDir, betw.skipDir, opts)
return betw.ans
}
func (r *region) addDir(rt gopathwalk.Root, dir string) {
// do we need to check times?
r.Lock()
defer r.Unlock()
x := filepath.ToSlash(string(toRelpath(Abspath(rt.Path), dir)))
r.ans = append(r.ans, toRelpath(Abspath(rt.Path), x))
}
func (r *region) skipDir(_ gopathwalk.Root, dir string) bool {
// The cache directory is already ignored in gopathwalk\
if filepath.Base(dir) == "internal" {
return true
}
if strings.Contains(dir, "toolchain@") {
return true
}
// don't look inside @ directories that are too old
if strings.Contains(filepath.Base(dir), "@") {
st, err := os.Stat(dir)
if err != nil {
log.Printf("can't stat dir %s %v", dir, err)
return true
}
if st.ModTime().Before(r.onlyAfter) {
return true
}
if st.ModTime().After(r.onlyBefore) {
return true
}
}
return false
}

266
vendor/golang.org/x/tools/internal/modindex/index.go generated vendored Normal file
View file

@ -0,0 +1,266 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package modindex
import (
"bufio"
"encoding/csv"
"errors"
"fmt"
"hash/crc64"
"io"
"io/fs"
"log"
"os"
"path/filepath"
"strconv"
"strings"
"testing"
"time"
)
/*
The on-disk index is a text file.
The first 3 lines are header information containing CurrentVersion,
the value of GOMODCACHE, and the validity date of the index.
(This is when the code started building the index.)
Following the header are sections of lines, one section for each
import path. These sections are sorted by package name.
The first line of each section, marked by a leading :, contains
the package name, the import path, the name of the directory relative
to GOMODCACHE, and its semantic version.
The rest of each section consists of one line per exported symbol.
The lines are sorted by the symbol's name and contain the name,
an indication of its lexical type (C, T, V, F), and if it is the
name of a function, information about the signature.
The fields in the section header lines are separated by commas, and
in the unlikely event this would be confusing, the csv package is used
to write (and read) them.
In the lines containing exported names, C=const, V=var, T=type, F=func.
If it is a func, the next field is the number of returned values,
followed by pairs consisting of formal parameter names and types.
All these fields are separated by spaces. Any spaces in a type
(e.g., chan struct{}) are replaced by $s on the disk. The $s are
turned back into spaces when read.
Here is an index header (the comments are not part of the index):
0 // version (of the index format)
/usr/local/google/home/pjw/go/pkg/mod // GOMODCACHE
2024-09-11 18:55:09 // validity date of the index
Here is an index section:
:yaml,gopkg.in/yaml.v1,gopkg.in/yaml.v1@v1.0.0-20140924161607-9f9df34309c0,v1.0.0-20140924161607-9f9df34309c0
Getter T
Marshal F 2 in interface{}
Setter T
Unmarshal F 1 in []byte out interface{}
The package name is yaml, the import path is gopkg.in/yaml.v1.
Getter and Setter are types, and Marshal and Unmarshal are functions.
The latter returns one value and has two arguments, 'in' and 'out'
whose types are []byte and interface{}.
*/
// CurrentVersion tells readers about the format of the index.
const CurrentVersion int = 0
// Index is returned by ReadIndex().
type Index struct {
Version int
Cachedir Abspath // The directory containing the module cache
Changed time.Time // The index is up to date as of Changed
Entries []Entry
}
// An Entry contains information for an import path.
type Entry struct {
Dir Relpath // directory in modcache
ImportPath string
PkgName string
Version string
//ModTime STime // is this useful?
Names []string // exported names and information
}
// IndexDir is where the module index is stored.
var IndexDir string
// Set IndexDir
func init() {
var dir string
var err error
if testing.Testing() {
dir = os.TempDir()
} else {
dir, err = os.UserCacheDir()
// shouldn't happen, but TempDir is better than
// creating ./go/imports
if err != nil {
dir = os.TempDir()
}
}
dir = filepath.Join(dir, "go", "imports")
os.MkdirAll(dir, 0777)
IndexDir = dir
}
// ReadIndex reads the latest version of the on-disk index
// for the cache directory cd.
// It returns (nil, nil) if there is no index, but returns
// a non-nil error if the index exists but could not be read.
func ReadIndex(cachedir string) (*Index, error) {
cachedir, err := filepath.Abs(cachedir)
if err != nil {
return nil, err
}
cd := Abspath(cachedir)
dir := IndexDir
base := indexNameBase(cd)
iname := filepath.Join(dir, base)
buf, err := os.ReadFile(iname)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return nil, nil
}
return nil, fmt.Errorf("cannot read %s: %w", iname, err)
}
fname := filepath.Join(dir, string(buf))
fd, err := os.Open(fname)
if err != nil {
return nil, err
}
defer fd.Close()
r := bufio.NewReader(fd)
ix, err := readIndexFrom(cd, r)
if err != nil {
return nil, err
}
return ix, nil
}
func readIndexFrom(cd Abspath, bx io.Reader) (*Index, error) {
b := bufio.NewScanner(bx)
var ans Index
// header
ok := b.Scan()
if !ok {
return nil, fmt.Errorf("unexpected scan error")
}
l := b.Text()
var err error
ans.Version, err = strconv.Atoi(l)
if err != nil {
return nil, err
}
if ans.Version != CurrentVersion {
return nil, fmt.Errorf("got version %d, expected %d", ans.Version, CurrentVersion)
}
if ok := b.Scan(); !ok {
return nil, fmt.Errorf("scanner error reading cachedir")
}
ans.Cachedir = Abspath(b.Text())
if ok := b.Scan(); !ok {
return nil, fmt.Errorf("scanner error reading index creation time")
}
// TODO(pjw): need to check that this is the expected cachedir
// so the tag should be passed in to this function
ans.Changed, err = time.ParseInLocation(time.DateTime, b.Text(), time.Local)
if err != nil {
return nil, err
}
var curEntry *Entry
for b.Scan() {
v := b.Text()
if v[0] == ':' {
if curEntry != nil {
ans.Entries = append(ans.Entries, *curEntry)
}
// as directories may contain commas and quotes, they need to be read as csv.
rdr := strings.NewReader(v[1:])
cs := csv.NewReader(rdr)
flds, err := cs.Read()
if err != nil {
return nil, err
}
if len(flds) != 4 {
return nil, fmt.Errorf("header contains %d fields, not 4: %q", len(v), v)
}
curEntry = &Entry{PkgName: flds[0], ImportPath: flds[1], Dir: toRelpath(cd, flds[2]), Version: flds[3]}
continue
}
curEntry.Names = append(curEntry.Names, v)
}
if curEntry != nil {
ans.Entries = append(ans.Entries, *curEntry)
}
if err := b.Err(); err != nil {
return nil, fmt.Errorf("scanner failed %v", err)
}
return &ans, nil
}
// write the index as a text file
func writeIndex(cachedir Abspath, ix *Index) error {
ipat := fmt.Sprintf("index-%d-*", CurrentVersion)
fd, err := os.CreateTemp(IndexDir, ipat)
if err != nil {
return err // can this happen?
}
defer fd.Close()
if err := writeIndexToFile(ix, fd); err != nil {
return err
}
content := fd.Name()
content = filepath.Base(content)
base := indexNameBase(cachedir)
nm := filepath.Join(IndexDir, base)
err = os.WriteFile(nm, []byte(content), 0666)
if err != nil {
return err
}
return nil
}
func writeIndexToFile(x *Index, fd *os.File) error {
cnt := 0
w := bufio.NewWriter(fd)
fmt.Fprintf(w, "%d\n", x.Version)
fmt.Fprintf(w, "%s\n", x.Cachedir)
// round the time down
tm := x.Changed.Add(-time.Second / 2)
fmt.Fprintf(w, "%s\n", tm.Format(time.DateTime))
for _, e := range x.Entries {
if e.ImportPath == "" {
continue // shouldn't happen
}
// PJW: maybe always write these headers as csv?
if strings.ContainsAny(string(e.Dir), ",\"") {
log.Printf("DIR: %s", e.Dir)
cw := csv.NewWriter(w)
cw.Write([]string{":" + e.PkgName, e.ImportPath, string(e.Dir), e.Version})
cw.Flush()
} else {
fmt.Fprintf(w, ":%s,%s,%s,%s\n", e.PkgName, e.ImportPath, e.Dir, e.Version)
}
for _, x := range e.Names {
fmt.Fprintf(w, "%s\n", x)
cnt++
}
}
if err := w.Flush(); err != nil {
return err
}
return nil
}
// return the base name of the file containing the name of the current index
func indexNameBase(cachedir Abspath) string {
// crc64 is a way to convert path names into 16 hex digits.
h := crc64.Checksum([]byte(cachedir), crc64.MakeTable(crc64.ECMA))
fname := fmt.Sprintf("index-name-%d-%016x", CurrentVersion, h)
return fname
}

178
vendor/golang.org/x/tools/internal/modindex/lookup.go generated vendored Normal file
View file

@ -0,0 +1,178 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package modindex
import (
"slices"
"strconv"
"strings"
)
type Candidate struct {
PkgName string
Name string
Dir string
ImportPath string
Type LexType
Deprecated bool
// information for Funcs
Results int16 // how many results
Sig []Field // arg names and types
}
type Field struct {
Arg, Type string
}
type LexType int8
const (
Const LexType = iota
Var
Type
Func
)
// LookupAll only returns those Candidates whose import path
// finds all the nms.
func (ix *Index) LookupAll(pkg string, names ...string) map[string][]Candidate {
// this can be made faster when benchmarks show that it needs to be
names = uniquify(names)
byImpPath := make(map[string][]Candidate)
for _, nm := range names {
cands := ix.Lookup(pkg, nm, false)
for _, c := range cands {
byImpPath[c.ImportPath] = append(byImpPath[c.ImportPath], c)
}
}
for k, v := range byImpPath {
if len(v) != len(names) {
delete(byImpPath, k)
}
}
return byImpPath
}
// remove duplicates
func uniquify(in []string) []string {
if len(in) == 0 {
return in
}
in = slices.Clone(in)
slices.Sort(in)
return slices.Compact(in)
}
// Lookup finds all the symbols in the index with the given PkgName and name.
// If prefix is true, it finds all of these with name as a prefix.
func (ix *Index) Lookup(pkg, name string, prefix bool) []Candidate {
loc, ok := slices.BinarySearchFunc(ix.Entries, pkg, func(e Entry, pkg string) int {
return strings.Compare(e.PkgName, pkg)
})
if !ok {
return nil // didn't find the package
}
var ans []Candidate
// loc is the first entry for this package name, but there may be severeal
for i := loc; i < len(ix.Entries); i++ {
e := ix.Entries[i]
if e.PkgName != pkg {
break // end of sorted package names
}
nloc, ok := slices.BinarySearchFunc(e.Names, name, func(s string, name string) int {
if strings.HasPrefix(s, name) {
return 0
}
if s < name {
return -1
}
return 1
})
if !ok {
continue // didn't find the name, nor any symbols with name as a prefix
}
for j := nloc; j < len(e.Names); j++ {
nstr := e.Names[j]
// benchmarks show this makes a difference when there are a lot of Possibilities
flds := fastSplit(nstr)
if !(flds[0] == name || prefix && strings.HasPrefix(flds[0], name)) {
// past range of matching Names
break
}
if len(flds) < 2 {
continue // should never happen
}
px := Candidate{
PkgName: pkg,
Name: flds[0],
Dir: string(e.Dir),
ImportPath: e.ImportPath,
Type: asLexType(flds[1][0]),
Deprecated: len(flds[1]) > 1 && flds[1][1] == 'D',
}
if px.Type == Func {
n, err := strconv.Atoi(flds[2])
if err != nil {
continue // should never happen
}
px.Results = int16(n)
if len(flds) >= 4 {
sig := strings.Split(flds[3], " ")
for i := 0; i < len(sig); i++ {
// $ cannot otherwise occur. removing the spaces
// almost works, but for chan struct{}, e.g.
sig[i] = strings.Replace(sig[i], "$", " ", -1)
}
px.Sig = toFields(sig)
}
}
ans = append(ans, px)
}
}
return ans
}
func toFields(sig []string) []Field {
ans := make([]Field, len(sig)/2)
for i := 0; i < len(ans); i++ {
ans[i] = Field{Arg: sig[2*i], Type: sig[2*i+1]}
}
return ans
}
// benchmarks show this is measurably better than strings.Split
// split into first 4 fields separated by single space
func fastSplit(x string) []string {
ans := make([]string, 0, 4)
nxt := 0
start := 0
for i := 0; i < len(x); i++ {
if x[i] != ' ' {
continue
}
ans = append(ans, x[start:i])
nxt++
start = i + 1
if nxt >= 3 {
break
}
}
ans = append(ans, x[start:])
return ans
}
func asLexType(c byte) LexType {
switch c {
case 'C':
return Const
case 'V':
return Var
case 'T':
return Type
case 'F':
return Func
}
return -1
}

164
vendor/golang.org/x/tools/internal/modindex/modindex.go generated vendored Normal file
View file

@ -0,0 +1,164 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package modindex contains code for building and searching an index to
// the Go module cache. The directory containing the index, returned by
// IndexDir(), contains a file index-name-<ver> that contains the name
// of the current index. We believe writing that short file is atomic.
// ReadIndex reads that file to get the file name of the index.
// WriteIndex writes an index with a unique name and then
// writes that name into a new version of index-name-<ver>.
// (<ver> stands for the CurrentVersion of the index format.)
package modindex
import (
"path/filepath"
"slices"
"strings"
"time"
"golang.org/x/mod/semver"
)
// Create always creates a new index for the go module cache that is in cachedir.
func Create(cachedir string) error {
_, err := indexModCache(cachedir, true)
return err
}
// Update the index for the go module cache that is in cachedir,
// If there is no existing index it will build one.
// If there are changed directories since the last index, it will
// write a new one and return true. Otherwise it returns false.
func Update(cachedir string) (bool, error) {
return indexModCache(cachedir, false)
}
// indexModCache writes an index current as of when it is called.
// If clear is true the index is constructed from all of GOMODCACHE
// otherwise the index is constructed from the last previous index
// and the updates to the cache. It returns true if it wrote an index,
// false otherwise.
func indexModCache(cachedir string, clear bool) (bool, error) {
cachedir, err := filepath.Abs(cachedir)
if err != nil {
return false, err
}
cd := Abspath(cachedir)
future := time.Now().Add(24 * time.Hour) // safely in the future
ok, err := modindexTimed(future, cd, clear)
if err != nil {
return false, err
}
return ok, nil
}
// modindexTimed writes an index current as of onlyBefore.
// If clear is true the index is constructed from all of GOMODCACHE
// otherwise the index is constructed from the last previous index
// and all the updates to the cache before onlyBefore.
// It returns true if it wrote a new index, false if it wrote nothing.
func modindexTimed(onlyBefore time.Time, cachedir Abspath, clear bool) (bool, error) {
var curIndex *Index
if !clear {
var err error
curIndex, err = ReadIndex(string(cachedir))
if clear && err != nil {
return false, err
}
// TODO(pjw): check that most of those directories still exist
}
cfg := &work{
onlyBefore: onlyBefore,
oldIndex: curIndex,
cacheDir: cachedir,
}
if curIndex != nil {
cfg.onlyAfter = curIndex.Changed
}
if err := cfg.buildIndex(); err != nil {
return false, err
}
if len(cfg.newIndex.Entries) == 0 && curIndex != nil {
// no changes from existing curIndex, don't write a new index
return false, nil
}
if err := cfg.writeIndex(); err != nil {
return false, err
}
return true, nil
}
type work struct {
onlyBefore time.Time // do not use directories later than this
onlyAfter time.Time // only interested in directories after this
// directories from before onlyAfter come from oldIndex
oldIndex *Index
newIndex *Index
cacheDir Abspath
}
func (w *work) buildIndex() error {
// The effective date of the new index should be at least
// slightly earlier than when the directories are scanned
// so set it now.
w.newIndex = &Index{Changed: time.Now(), Cachedir: w.cacheDir}
dirs := findDirs(string(w.cacheDir), w.onlyAfter, w.onlyBefore)
if len(dirs) == 0 {
return nil
}
newdirs, err := byImportPath(dirs)
if err != nil {
return err
}
// for each import path it might occur only in newdirs,
// only in w.oldIndex, or in both.
// If it occurs in both, use the semantically later one
if w.oldIndex != nil {
for _, e := range w.oldIndex.Entries {
found, ok := newdirs[e.ImportPath]
if !ok {
w.newIndex.Entries = append(w.newIndex.Entries, e)
continue // use this one, there is no new one
}
if semver.Compare(found[0].version, e.Version) > 0 {
// use the new one
} else {
// use the old one, forget the new one
w.newIndex.Entries = append(w.newIndex.Entries, e)
delete(newdirs, e.ImportPath)
}
}
}
// get symbol information for all the new diredtories
getSymbols(w.cacheDir, newdirs)
// assemble the new index entries
for k, v := range newdirs {
d := v[0]
pkg, names := processSyms(d.syms)
if pkg == "" {
continue // PJW: does this ever happen?
}
entry := Entry{
PkgName: pkg,
Dir: d.path,
ImportPath: k,
Version: d.version,
Names: names,
}
w.newIndex.Entries = append(w.newIndex.Entries, entry)
}
// sort the entries in the new index
slices.SortFunc(w.newIndex.Entries, func(l, r Entry) int {
if n := strings.Compare(l.PkgName, r.PkgName); n != 0 {
return n
}
return strings.Compare(l.ImportPath, r.ImportPath)
})
return nil
}
func (w *work) writeIndex() error {
return writeIndex(w.cacheDir, w.newIndex)
}

218
vendor/golang.org/x/tools/internal/modindex/symbols.go generated vendored Normal file
View file

@ -0,0 +1,218 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package modindex
import (
"fmt"
"go/ast"
"go/parser"
"go/token"
"go/types"
"os"
"path/filepath"
"runtime"
"slices"
"strings"
"golang.org/x/sync/errgroup"
)
// The name of a symbol contains information about the symbol:
// <name> T for types, TD if the type is deprecated
// <name> C for consts, CD if the const is deprecated
// <name> V for vars, VD if the var is deprecated
// and for funcs: <name> F <num of return values> (<arg-name> <arg-type>)*
// any spaces in <arg-type> are replaced by $s so that the fields
// of the name are space separated. F is replaced by FD if the func
// is deprecated.
type symbol struct {
pkg string // name of the symbols's package
name string // declared name
kind string // T, C, V, or F, follwed by D if deprecated
sig string // signature information, for F
}
// find the symbols for the best directories
func getSymbols(cd Abspath, dirs map[string][]*directory) {
var g errgroup.Group
g.SetLimit(max(2, runtime.GOMAXPROCS(0)/2))
for _, vv := range dirs {
// throttling some day?
d := vv[0]
g.Go(func() error {
thedir := filepath.Join(string(cd), string(d.path))
mode := parser.SkipObjectResolution | parser.ParseComments
fi, err := os.ReadDir(thedir)
if err != nil {
return nil // log this someday?
}
for _, fx := range fi {
if !strings.HasSuffix(fx.Name(), ".go") || strings.HasSuffix(fx.Name(), "_test.go") {
continue
}
fname := filepath.Join(thedir, fx.Name())
tr, err := parser.ParseFile(token.NewFileSet(), fname, nil, mode)
if err != nil {
continue // ignore errors, someday log them?
}
d.syms = append(d.syms, getFileExports(tr)...)
}
return nil
})
}
g.Wait()
}
func getFileExports(f *ast.File) []symbol {
pkg := f.Name.Name
if pkg == "main" {
return nil
}
var ans []symbol
// should we look for //go:build ignore?
for _, decl := range f.Decls {
switch decl := decl.(type) {
case *ast.FuncDecl:
if decl.Recv != nil {
// ignore methods, as we are completing package selections
continue
}
name := decl.Name.Name
dtype := decl.Type
// not looking at dtype.TypeParams. That is, treating
// generic functions just like non-generic ones.
sig := dtype.Params
kind := "F"
if isDeprecated(decl.Doc) {
kind += "D"
}
result := []string{fmt.Sprintf("%d", dtype.Results.NumFields())}
for _, x := range sig.List {
// This code creates a string representing the type.
// TODO(pjw): it may be fragile:
// 1. x.Type could be nil, perhaps in ill-formed code
// 2. ExprString might someday change incompatibly to
// include struct tags, which can be arbitrary strings
if x.Type == nil {
// Can this happen without a parse error? (Files with parse
// errors are ignored in getSymbols)
continue // maybe report this someday
}
tp := types.ExprString(x.Type)
if len(tp) == 0 {
// Can this happen?
continue // maybe report this someday
}
// This is only safe if ExprString never returns anything with a $
// The only place a $ can occur seems to be in a struct tag, which
// can be an arbitrary string literal, and ExprString does not presently
// print struct tags. So for this to happen the type of a formal parameter
// has to be a explict struct, e.g. foo(x struct{a int "$"}) and ExprString
// would have to show the struct tag. Even testing for this case seems
// a waste of effort, but let's remember the possibility
if strings.Contains(tp, "$") {
continue
}
tp = strings.Replace(tp, " ", "$", -1)
if len(x.Names) == 0 {
result = append(result, "_")
result = append(result, tp)
} else {
for _, y := range x.Names {
result = append(result, y.Name)
result = append(result, tp)
}
}
}
sigs := strings.Join(result, " ")
if s := newsym(pkg, name, kind, sigs); s != nil {
ans = append(ans, *s)
}
case *ast.GenDecl:
depr := isDeprecated(decl.Doc)
switch decl.Tok {
case token.CONST, token.VAR:
tp := "V"
if decl.Tok == token.CONST {
tp = "C"
}
if depr {
tp += "D"
}
for _, sp := range decl.Specs {
for _, x := range sp.(*ast.ValueSpec).Names {
if s := newsym(pkg, x.Name, tp, ""); s != nil {
ans = append(ans, *s)
}
}
}
case token.TYPE:
tp := "T"
if depr {
tp += "D"
}
for _, sp := range decl.Specs {
if s := newsym(pkg, sp.(*ast.TypeSpec).Name.Name, tp, ""); s != nil {
ans = append(ans, *s)
}
}
}
}
}
return ans
}
func newsym(pkg, name, kind, sig string) *symbol {
if len(name) == 0 || !ast.IsExported(name) {
return nil
}
sym := symbol{pkg: pkg, name: name, kind: kind, sig: sig}
return &sym
}
func isDeprecated(doc *ast.CommentGroup) bool {
if doc == nil {
return false
}
// go.dev/wiki/Deprecated Paragraph starting 'Deprecated:'
// This code fails for /* Deprecated: */, but it's the code from
// gopls/internal/analysis/deprecated
lines := strings.Split(doc.Text(), "\n\n")
for _, line := range lines {
if strings.HasPrefix(line, "Deprecated:") {
return true
}
}
return false
}
// return the package name and the value for the symbols.
// if there are multiple packages, choose one arbitrarily
// the returned slice is sorted lexicographically
func processSyms(syms []symbol) (string, []string) {
if len(syms) == 0 {
return "", nil
}
slices.SortFunc(syms, func(l, r symbol) int {
return strings.Compare(l.name, r.name)
})
pkg := syms[0].pkg
var names []string
for _, s := range syms {
var nx string
if s.pkg == pkg {
if s.sig != "" {
nx = fmt.Sprintf("%s %s %s", s.name, s.kind, s.sig)
} else {
nx = fmt.Sprintf("%s %s", s.name, s.kind)
}
names = append(names, nx)
} else {
continue // PJW: do we want to keep track of these?
}
}
return pkg, names
}

25
vendor/golang.org/x/tools/internal/modindex/types.go generated vendored Normal file
View file

@ -0,0 +1,25 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package modindex
import (
"strings"
)
// some special types to avoid confusions
// distinguish various types of directory names. It's easy to get confused.
type Abspath string // absolute paths
type Relpath string // paths with GOMODCACHE prefix removed
func toRelpath(cachedir Abspath, s string) Relpath {
if strings.HasPrefix(s, string(cachedir)) {
if s == string(cachedir) {
return Relpath("")
}
return Relpath(s[len(cachedir)+1:])
}
return Relpath(s)
}

View file

@ -268,6 +268,8 @@ var PackageSymbols = map[string][]Symbol{
{"ErrTooLarge", Var, 0},
{"Fields", Func, 0},
{"FieldsFunc", Func, 0},
{"FieldsFuncSeq", Func, 24},
{"FieldsSeq", Func, 24},
{"HasPrefix", Func, 0},
{"HasSuffix", Func, 0},
{"Index", Func, 0},
@ -280,6 +282,7 @@ var PackageSymbols = map[string][]Symbol{
{"LastIndexAny", Func, 0},
{"LastIndexByte", Func, 5},
{"LastIndexFunc", Func, 0},
{"Lines", Func, 24},
{"Map", Func, 0},
{"MinRead", Const, 0},
{"NewBuffer", Func, 0},
@ -293,7 +296,9 @@ var PackageSymbols = map[string][]Symbol{
{"Split", Func, 0},
{"SplitAfter", Func, 0},
{"SplitAfterN", Func, 0},
{"SplitAfterSeq", Func, 24},
{"SplitN", Func, 0},
{"SplitSeq", Func, 24},
{"Title", Func, 0},
{"ToLower", Func, 0},
{"ToLowerSpecial", Func, 0},
@ -535,6 +540,7 @@ var PackageSymbols = map[string][]Symbol{
{"NewCTR", Func, 0},
{"NewGCM", Func, 2},
{"NewGCMWithNonceSize", Func, 5},
{"NewGCMWithRandomNonce", Func, 24},
{"NewGCMWithTagSize", Func, 11},
{"NewOFB", Func, 0},
{"Stream", Type, 0},
@ -673,6 +679,14 @@ var PackageSymbols = map[string][]Symbol{
{"Unmarshal", Func, 0},
{"UnmarshalCompressed", Func, 15},
},
"crypto/fips140": {
{"Enabled", Func, 24},
},
"crypto/hkdf": {
{"Expand", Func, 24},
{"Extract", Func, 24},
{"Key", Func, 24},
},
"crypto/hmac": {
{"Equal", Func, 1},
{"New", Func, 0},
@ -683,11 +697,43 @@ var PackageSymbols = map[string][]Symbol{
{"Size", Const, 0},
{"Sum", Func, 2},
},
"crypto/mlkem": {
{"(*DecapsulationKey1024).Bytes", Method, 24},
{"(*DecapsulationKey1024).Decapsulate", Method, 24},
{"(*DecapsulationKey1024).EncapsulationKey", Method, 24},
{"(*DecapsulationKey768).Bytes", Method, 24},
{"(*DecapsulationKey768).Decapsulate", Method, 24},
{"(*DecapsulationKey768).EncapsulationKey", Method, 24},
{"(*EncapsulationKey1024).Bytes", Method, 24},
{"(*EncapsulationKey1024).Encapsulate", Method, 24},
{"(*EncapsulationKey768).Bytes", Method, 24},
{"(*EncapsulationKey768).Encapsulate", Method, 24},
{"CiphertextSize1024", Const, 24},
{"CiphertextSize768", Const, 24},
{"DecapsulationKey1024", Type, 24},
{"DecapsulationKey768", Type, 24},
{"EncapsulationKey1024", Type, 24},
{"EncapsulationKey768", Type, 24},
{"EncapsulationKeySize1024", Const, 24},
{"EncapsulationKeySize768", Const, 24},
{"GenerateKey1024", Func, 24},
{"GenerateKey768", Func, 24},
{"NewDecapsulationKey1024", Func, 24},
{"NewDecapsulationKey768", Func, 24},
{"NewEncapsulationKey1024", Func, 24},
{"NewEncapsulationKey768", Func, 24},
{"SeedSize", Const, 24},
{"SharedKeySize", Const, 24},
},
"crypto/pbkdf2": {
{"Key", Func, 24},
},
"crypto/rand": {
{"Int", Func, 0},
{"Prime", Func, 0},
{"Read", Func, 0},
{"Reader", Var, 0},
{"Text", Func, 24},
},
"crypto/rc4": {
{"(*Cipher).Reset", Method, 0},
@ -766,6 +812,39 @@ var PackageSymbols = map[string][]Symbol{
{"Sum224", Func, 2},
{"Sum256", Func, 2},
},
"crypto/sha3": {
{"(*SHA3).AppendBinary", Method, 24},
{"(*SHA3).BlockSize", Method, 24},
{"(*SHA3).MarshalBinary", Method, 24},
{"(*SHA3).Reset", Method, 24},
{"(*SHA3).Size", Method, 24},
{"(*SHA3).Sum", Method, 24},
{"(*SHA3).UnmarshalBinary", Method, 24},
{"(*SHA3).Write", Method, 24},
{"(*SHAKE).AppendBinary", Method, 24},
{"(*SHAKE).BlockSize", Method, 24},
{"(*SHAKE).MarshalBinary", Method, 24},
{"(*SHAKE).Read", Method, 24},
{"(*SHAKE).Reset", Method, 24},
{"(*SHAKE).UnmarshalBinary", Method, 24},
{"(*SHAKE).Write", Method, 24},
{"New224", Func, 24},
{"New256", Func, 24},
{"New384", Func, 24},
{"New512", Func, 24},
{"NewCSHAKE128", Func, 24},
{"NewCSHAKE256", Func, 24},
{"NewSHAKE128", Func, 24},
{"NewSHAKE256", Func, 24},
{"SHA3", Type, 24},
{"SHAKE", Type, 24},
{"Sum224", Func, 24},
{"Sum256", Func, 24},
{"Sum384", Func, 24},
{"Sum512", Func, 24},
{"SumSHAKE128", Func, 24},
{"SumSHAKE256", Func, 24},
},
"crypto/sha512": {
{"BlockSize", Const, 0},
{"New", Func, 0},
@ -788,6 +867,7 @@ var PackageSymbols = map[string][]Symbol{
{"ConstantTimeEq", Func, 0},
{"ConstantTimeLessOrEq", Func, 2},
{"ConstantTimeSelect", Func, 0},
{"WithDataIndependentTiming", Func, 24},
{"XORBytes", Func, 20},
},
"crypto/tls": {
@ -864,6 +944,7 @@ var PackageSymbols = map[string][]Symbol{
{"ClientHelloInfo", Type, 4},
{"ClientHelloInfo.CipherSuites", Field, 4},
{"ClientHelloInfo.Conn", Field, 8},
{"ClientHelloInfo.Extensions", Field, 24},
{"ClientHelloInfo.ServerName", Field, 4},
{"ClientHelloInfo.SignatureSchemes", Field, 8},
{"ClientHelloInfo.SupportedCurves", Field, 4},
@ -881,6 +962,7 @@ var PackageSymbols = map[string][]Symbol{
{"Config.CurvePreferences", Field, 3},
{"Config.DynamicRecordSizingDisabled", Field, 7},
{"Config.EncryptedClientHelloConfigList", Field, 23},
{"Config.EncryptedClientHelloKeys", Field, 24},
{"Config.EncryptedClientHelloRejectionVerify", Field, 23},
{"Config.GetCertificate", Field, 4},
{"Config.GetClientCertificate", Field, 8},
@ -934,6 +1016,10 @@ var PackageSymbols = map[string][]Symbol{
{"ECHRejectionError", Type, 23},
{"ECHRejectionError.RetryConfigList", Field, 23},
{"Ed25519", Const, 13},
{"EncryptedClientHelloKey", Type, 24},
{"EncryptedClientHelloKey.Config", Field, 24},
{"EncryptedClientHelloKey.PrivateKey", Field, 24},
{"EncryptedClientHelloKey.SendAsRetry", Field, 24},
{"InsecureCipherSuites", Func, 14},
{"Listen", Func, 0},
{"LoadX509KeyPair", Func, 0},
@ -1032,6 +1118,7 @@ var PackageSymbols = map[string][]Symbol{
{"VersionTLS12", Const, 2},
{"VersionTLS13", Const, 12},
{"X25519", Const, 8},
{"X25519MLKEM768", Const, 24},
{"X509KeyPair", Func, 0},
},
"crypto/x509": {
@ -1056,6 +1143,8 @@ var PackageSymbols = map[string][]Symbol{
{"(ConstraintViolationError).Error", Method, 0},
{"(HostnameError).Error", Method, 0},
{"(InsecureAlgorithmError).Error", Method, 6},
{"(OID).AppendBinary", Method, 24},
{"(OID).AppendText", Method, 24},
{"(OID).Equal", Method, 22},
{"(OID).EqualASN1OID", Method, 22},
{"(OID).MarshalBinary", Method, 23},
@ -1084,6 +1173,10 @@ var PackageSymbols = map[string][]Symbol{
{"Certificate.Extensions", Field, 2},
{"Certificate.ExtraExtensions", Field, 2},
{"Certificate.IPAddresses", Field, 1},
{"Certificate.InhibitAnyPolicy", Field, 24},
{"Certificate.InhibitAnyPolicyZero", Field, 24},
{"Certificate.InhibitPolicyMapping", Field, 24},
{"Certificate.InhibitPolicyMappingZero", Field, 24},
{"Certificate.IsCA", Field, 0},
{"Certificate.Issuer", Field, 0},
{"Certificate.IssuingCertificateURL", Field, 2},
@ -1100,6 +1193,7 @@ var PackageSymbols = map[string][]Symbol{
{"Certificate.PermittedURIDomains", Field, 10},
{"Certificate.Policies", Field, 22},
{"Certificate.PolicyIdentifiers", Field, 0},
{"Certificate.PolicyMappings", Field, 24},
{"Certificate.PublicKey", Field, 0},
{"Certificate.PublicKeyAlgorithm", Field, 0},
{"Certificate.Raw", Field, 0},
@ -1107,6 +1201,8 @@ var PackageSymbols = map[string][]Symbol{
{"Certificate.RawSubject", Field, 0},
{"Certificate.RawSubjectPublicKeyInfo", Field, 0},
{"Certificate.RawTBSCertificate", Field, 0},
{"Certificate.RequireExplicitPolicy", Field, 24},
{"Certificate.RequireExplicitPolicyZero", Field, 24},
{"Certificate.SerialNumber", Field, 0},
{"Certificate.Signature", Field, 0},
{"Certificate.SignatureAlgorithm", Field, 0},
@ -1198,6 +1294,7 @@ var PackageSymbols = map[string][]Symbol{
{"NameConstraintsWithoutSANs", Const, 10},
{"NameMismatch", Const, 8},
{"NewCertPool", Func, 0},
{"NoValidChains", Const, 24},
{"NotAuthorizedToSign", Const, 0},
{"OID", Type, 22},
{"OIDFromInts", Func, 22},
@ -1219,6 +1316,9 @@ var PackageSymbols = map[string][]Symbol{
{"ParsePKCS8PrivateKey", Func, 0},
{"ParsePKIXPublicKey", Func, 0},
{"ParseRevocationList", Func, 19},
{"PolicyMapping", Type, 24},
{"PolicyMapping.IssuerDomainPolicy", Field, 24},
{"PolicyMapping.SubjectDomainPolicy", Field, 24},
{"PublicKeyAlgorithm", Type, 0},
{"PureEd25519", Const, 13},
{"RSA", Const, 0},
@ -1265,6 +1365,7 @@ var PackageSymbols = map[string][]Symbol{
{"UnknownPublicKeyAlgorithm", Const, 0},
{"UnknownSignatureAlgorithm", Const, 0},
{"VerifyOptions", Type, 0},
{"VerifyOptions.CertificatePolicies", Field, 24},
{"VerifyOptions.CurrentTime", Field, 0},
{"VerifyOptions.DNSName", Field, 0},
{"VerifyOptions.Intermediates", Field, 0},
@ -1975,6 +2076,8 @@ var PackageSymbols = map[string][]Symbol{
{"(*File).DynString", Method, 1},
{"(*File).DynValue", Method, 21},
{"(*File).DynamicSymbols", Method, 4},
{"(*File).DynamicVersionNeeds", Method, 24},
{"(*File).DynamicVersions", Method, 24},
{"(*File).ImportedLibraries", Method, 0},
{"(*File).ImportedSymbols", Method, 0},
{"(*File).Section", Method, 0},
@ -2240,6 +2343,19 @@ var PackageSymbols = map[string][]Symbol{
{"DynFlag", Type, 0},
{"DynFlag1", Type, 21},
{"DynTag", Type, 0},
{"DynamicVersion", Type, 24},
{"DynamicVersion.Deps", Field, 24},
{"DynamicVersion.Flags", Field, 24},
{"DynamicVersion.Index", Field, 24},
{"DynamicVersion.Name", Field, 24},
{"DynamicVersionDep", Type, 24},
{"DynamicVersionDep.Dep", Field, 24},
{"DynamicVersionDep.Flags", Field, 24},
{"DynamicVersionDep.Index", Field, 24},
{"DynamicVersionFlag", Type, 24},
{"DynamicVersionNeed", Type, 24},
{"DynamicVersionNeed.Name", Field, 24},
{"DynamicVersionNeed.Needs", Field, 24},
{"EI_ABIVERSION", Const, 0},
{"EI_CLASS", Const, 0},
{"EI_DATA", Const, 0},
@ -3726,8 +3842,19 @@ var PackageSymbols = map[string][]Symbol{
{"Symbol.Size", Field, 0},
{"Symbol.Value", Field, 0},
{"Symbol.Version", Field, 13},
{"Symbol.VersionIndex", Field, 24},
{"Symbol.VersionScope", Field, 24},
{"SymbolVersionScope", Type, 24},
{"Type", Type, 0},
{"VER_FLG_BASE", Const, 24},
{"VER_FLG_INFO", Const, 24},
{"VER_FLG_WEAK", Const, 24},
{"Version", Type, 0},
{"VersionScopeGlobal", Const, 24},
{"VersionScopeHidden", Const, 24},
{"VersionScopeLocal", Const, 24},
{"VersionScopeNone", Const, 24},
{"VersionScopeSpecific", Const, 24},
},
"debug/gosym": {
{"(*DecodingError).Error", Method, 0},
@ -4453,8 +4580,10 @@ var PackageSymbols = map[string][]Symbol{
{"FS", Type, 16},
},
"encoding": {
{"BinaryAppender", Type, 24},
{"BinaryMarshaler", Type, 2},
{"BinaryUnmarshaler", Type, 2},
{"TextAppender", Type, 24},
{"TextMarshaler", Type, 2},
{"TextUnmarshaler", Type, 2},
},
@ -5984,13 +6113,16 @@ var PackageSymbols = map[string][]Symbol{
{"(*Interface).Complete", Method, 5},
{"(*Interface).Embedded", Method, 5},
{"(*Interface).EmbeddedType", Method, 11},
{"(*Interface).EmbeddedTypes", Method, 24},
{"(*Interface).Empty", Method, 5},
{"(*Interface).ExplicitMethod", Method, 5},
{"(*Interface).ExplicitMethods", Method, 24},
{"(*Interface).IsComparable", Method, 18},
{"(*Interface).IsImplicit", Method, 18},
{"(*Interface).IsMethodSet", Method, 18},
{"(*Interface).MarkImplicit", Method, 18},
{"(*Interface).Method", Method, 5},
{"(*Interface).Methods", Method, 24},
{"(*Interface).NumEmbeddeds", Method, 5},
{"(*Interface).NumExplicitMethods", Method, 5},
{"(*Interface).NumMethods", Method, 5},
@ -6011,9 +6143,11 @@ var PackageSymbols = map[string][]Symbol{
{"(*MethodSet).At", Method, 5},
{"(*MethodSet).Len", Method, 5},
{"(*MethodSet).Lookup", Method, 5},
{"(*MethodSet).Methods", Method, 24},
{"(*MethodSet).String", Method, 5},
{"(*Named).AddMethod", Method, 5},
{"(*Named).Method", Method, 5},
{"(*Named).Methods", Method, 24},
{"(*Named).NumMethods", Method, 5},
{"(*Named).Obj", Method, 5},
{"(*Named).Origin", Method, 18},
@ -6054,6 +6188,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Pointer).String", Method, 5},
{"(*Pointer).Underlying", Method, 5},
{"(*Scope).Child", Method, 5},
{"(*Scope).Children", Method, 24},
{"(*Scope).Contains", Method, 5},
{"(*Scope).End", Method, 5},
{"(*Scope).Innermost", Method, 5},
@ -6089,6 +6224,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*StdSizes).Offsetsof", Method, 5},
{"(*StdSizes).Sizeof", Method, 5},
{"(*Struct).Field", Method, 5},
{"(*Struct).Fields", Method, 24},
{"(*Struct).NumFields", Method, 5},
{"(*Struct).String", Method, 5},
{"(*Struct).Tag", Method, 5},
@ -6100,8 +6236,10 @@ var PackageSymbols = map[string][]Symbol{
{"(*Tuple).Len", Method, 5},
{"(*Tuple).String", Method, 5},
{"(*Tuple).Underlying", Method, 5},
{"(*Tuple).Variables", Method, 24},
{"(*TypeList).At", Method, 18},
{"(*TypeList).Len", Method, 18},
{"(*TypeList).Types", Method, 24},
{"(*TypeName).Exported", Method, 5},
{"(*TypeName).Id", Method, 5},
{"(*TypeName).IsAlias", Method, 9},
@ -6119,9 +6257,11 @@ var PackageSymbols = map[string][]Symbol{
{"(*TypeParam).Underlying", Method, 18},
{"(*TypeParamList).At", Method, 18},
{"(*TypeParamList).Len", Method, 18},
{"(*TypeParamList).TypeParams", Method, 24},
{"(*Union).Len", Method, 18},
{"(*Union).String", Method, 18},
{"(*Union).Term", Method, 18},
{"(*Union).Terms", Method, 24},
{"(*Union).Underlying", Method, 18},
{"(*Var).Anonymous", Method, 5},
{"(*Var).Embedded", Method, 11},
@ -6392,10 +6532,12 @@ var PackageSymbols = map[string][]Symbol{
{"(*Hash).WriteByte", Method, 14},
{"(*Hash).WriteString", Method, 14},
{"Bytes", Func, 19},
{"Comparable", Func, 24},
{"Hash", Type, 14},
{"MakeSeed", Func, 14},
{"Seed", Type, 14},
{"String", Func, 19},
{"WriteComparable", Func, 24},
},
"html": {
{"EscapeString", Func, 0},
@ -7082,6 +7224,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*JSONHandler).WithGroup", Method, 21},
{"(*Level).UnmarshalJSON", Method, 21},
{"(*Level).UnmarshalText", Method, 21},
{"(*LevelVar).AppendText", Method, 24},
{"(*LevelVar).Level", Method, 21},
{"(*LevelVar).MarshalText", Method, 21},
{"(*LevelVar).Set", Method, 21},
@ -7110,6 +7253,7 @@ var PackageSymbols = map[string][]Symbol{
{"(Attr).Equal", Method, 21},
{"(Attr).String", Method, 21},
{"(Kind).String", Method, 21},
{"(Level).AppendText", Method, 24},
{"(Level).Level", Method, 21},
{"(Level).MarshalJSON", Method, 21},
{"(Level).MarshalText", Method, 21},
@ -7140,6 +7284,7 @@ var PackageSymbols = map[string][]Symbol{
{"Debug", Func, 21},
{"DebugContext", Func, 21},
{"Default", Func, 21},
{"DiscardHandler", Var, 24},
{"Duration", Func, 21},
{"DurationValue", Func, 21},
{"Error", Func, 21},
@ -7375,6 +7520,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Float).Acc", Method, 5},
{"(*Float).Add", Method, 5},
{"(*Float).Append", Method, 5},
{"(*Float).AppendText", Method, 24},
{"(*Float).Cmp", Method, 5},
{"(*Float).Copy", Method, 5},
{"(*Float).Float32", Method, 5},
@ -7421,6 +7567,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Int).And", Method, 0},
{"(*Int).AndNot", Method, 0},
{"(*Int).Append", Method, 6},
{"(*Int).AppendText", Method, 24},
{"(*Int).Binomial", Method, 0},
{"(*Int).Bit", Method, 0},
{"(*Int).BitLen", Method, 0},
@ -7477,6 +7624,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Int).Xor", Method, 0},
{"(*Rat).Abs", Method, 0},
{"(*Rat).Add", Method, 0},
{"(*Rat).AppendText", Method, 24},
{"(*Rat).Cmp", Method, 0},
{"(*Rat).Denom", Method, 0},
{"(*Rat).Float32", Method, 4},
@ -7659,11 +7807,13 @@ var PackageSymbols = map[string][]Symbol{
{"Zipf", Type, 0},
},
"math/rand/v2": {
{"(*ChaCha8).AppendBinary", Method, 24},
{"(*ChaCha8).MarshalBinary", Method, 22},
{"(*ChaCha8).Read", Method, 23},
{"(*ChaCha8).Seed", Method, 22},
{"(*ChaCha8).Uint64", Method, 22},
{"(*ChaCha8).UnmarshalBinary", Method, 22},
{"(*PCG).AppendBinary", Method, 24},
{"(*PCG).MarshalBinary", Method, 22},
{"(*PCG).Seed", Method, 22},
{"(*PCG).Uint64", Method, 22},
@ -7931,6 +8081,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*UnixListener).SyscallConn", Method, 10},
{"(Flags).String", Method, 0},
{"(HardwareAddr).String", Method, 0},
{"(IP).AppendText", Method, 24},
{"(IP).DefaultMask", Method, 0},
{"(IP).Equal", Method, 0},
{"(IP).IsGlobalUnicast", Method, 0},
@ -8131,6 +8282,9 @@ var PackageSymbols = map[string][]Symbol{
{"(*MaxBytesError).Error", Method, 19},
{"(*ProtocolError).Error", Method, 0},
{"(*ProtocolError).Is", Method, 21},
{"(*Protocols).SetHTTP1", Method, 24},
{"(*Protocols).SetHTTP2", Method, 24},
{"(*Protocols).SetUnencryptedHTTP2", Method, 24},
{"(*Request).AddCookie", Method, 0},
{"(*Request).BasicAuth", Method, 4},
{"(*Request).Clone", Method, 13},
@ -8190,6 +8344,10 @@ var PackageSymbols = map[string][]Symbol{
{"(Header).Values", Method, 14},
{"(Header).Write", Method, 0},
{"(Header).WriteSubset", Method, 0},
{"(Protocols).HTTP1", Method, 24},
{"(Protocols).HTTP2", Method, 24},
{"(Protocols).String", Method, 24},
{"(Protocols).UnencryptedHTTP2", Method, 24},
{"AllowQuerySemicolons", Func, 17},
{"CanonicalHeaderKey", Func, 0},
{"Client", Type, 0},
@ -8252,6 +8410,18 @@ var PackageSymbols = map[string][]Symbol{
{"FileSystem", Type, 0},
{"Flusher", Type, 0},
{"Get", Func, 0},
{"HTTP2Config", Type, 24},
{"HTTP2Config.CountError", Field, 24},
{"HTTP2Config.MaxConcurrentStreams", Field, 24},
{"HTTP2Config.MaxDecoderHeaderTableSize", Field, 24},
{"HTTP2Config.MaxEncoderHeaderTableSize", Field, 24},
{"HTTP2Config.MaxReadFrameSize", Field, 24},
{"HTTP2Config.MaxReceiveBufferPerConnection", Field, 24},
{"HTTP2Config.MaxReceiveBufferPerStream", Field, 24},
{"HTTP2Config.PermitProhibitedCipherSuites", Field, 24},
{"HTTP2Config.PingTimeout", Field, 24},
{"HTTP2Config.SendPingTimeout", Field, 24},
{"HTTP2Config.WriteByteTimeout", Field, 24},
{"Handle", Func, 0},
{"HandleFunc", Func, 0},
{"Handler", Type, 0},
@ -8292,6 +8462,7 @@ var PackageSymbols = map[string][]Symbol{
{"PostForm", Func, 0},
{"ProtocolError", Type, 0},
{"ProtocolError.ErrorString", Field, 0},
{"Protocols", Type, 24},
{"ProxyFromEnvironment", Func, 0},
{"ProxyURL", Func, 0},
{"PushOptions", Type, 8},
@ -8361,9 +8532,11 @@ var PackageSymbols = map[string][]Symbol{
{"Server.ConnState", Field, 3},
{"Server.DisableGeneralOptionsHandler", Field, 20},
{"Server.ErrorLog", Field, 3},
{"Server.HTTP2", Field, 24},
{"Server.Handler", Field, 0},
{"Server.IdleTimeout", Field, 8},
{"Server.MaxHeaderBytes", Field, 0},
{"Server.Protocols", Field, 24},
{"Server.ReadHeaderTimeout", Field, 8},
{"Server.ReadTimeout", Field, 0},
{"Server.TLSConfig", Field, 0},
@ -8453,12 +8626,14 @@ var PackageSymbols = map[string][]Symbol{
{"Transport.ExpectContinueTimeout", Field, 6},
{"Transport.ForceAttemptHTTP2", Field, 13},
{"Transport.GetProxyConnectHeader", Field, 16},
{"Transport.HTTP2", Field, 24},
{"Transport.IdleConnTimeout", Field, 7},
{"Transport.MaxConnsPerHost", Field, 11},
{"Transport.MaxIdleConns", Field, 7},
{"Transport.MaxIdleConnsPerHost", Field, 0},
{"Transport.MaxResponseHeaderBytes", Field, 7},
{"Transport.OnProxyConnectResponse", Field, 20},
{"Transport.Protocols", Field, 24},
{"Transport.Proxy", Field, 0},
{"Transport.ProxyConnectHeader", Field, 8},
{"Transport.ReadBufferSize", Field, 13},
@ -8646,6 +8821,8 @@ var PackageSymbols = map[string][]Symbol{
{"(*AddrPort).UnmarshalText", Method, 18},
{"(*Prefix).UnmarshalBinary", Method, 18},
{"(*Prefix).UnmarshalText", Method, 18},
{"(Addr).AppendBinary", Method, 24},
{"(Addr).AppendText", Method, 24},
{"(Addr).AppendTo", Method, 18},
{"(Addr).As16", Method, 18},
{"(Addr).As4", Method, 18},
@ -8676,6 +8853,8 @@ var PackageSymbols = map[string][]Symbol{
{"(Addr).WithZone", Method, 18},
{"(Addr).Zone", Method, 18},
{"(AddrPort).Addr", Method, 18},
{"(AddrPort).AppendBinary", Method, 24},
{"(AddrPort).AppendText", Method, 24},
{"(AddrPort).AppendTo", Method, 18},
{"(AddrPort).Compare", Method, 22},
{"(AddrPort).IsValid", Method, 18},
@ -8684,6 +8863,8 @@ var PackageSymbols = map[string][]Symbol{
{"(AddrPort).Port", Method, 18},
{"(AddrPort).String", Method, 18},
{"(Prefix).Addr", Method, 18},
{"(Prefix).AppendBinary", Method, 24},
{"(Prefix).AppendText", Method, 24},
{"(Prefix).AppendTo", Method, 18},
{"(Prefix).Bits", Method, 18},
{"(Prefix).Contains", Method, 18},
@ -8868,6 +9049,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Error).Temporary", Method, 6},
{"(*Error).Timeout", Method, 6},
{"(*Error).Unwrap", Method, 13},
{"(*URL).AppendBinary", Method, 24},
{"(*URL).EscapedFragment", Method, 15},
{"(*URL).EscapedPath", Method, 5},
{"(*URL).Hostname", Method, 8},
@ -8967,6 +9149,17 @@ var PackageSymbols = map[string][]Symbol{
{"(*ProcessState).SysUsage", Method, 0},
{"(*ProcessState).SystemTime", Method, 0},
{"(*ProcessState).UserTime", Method, 0},
{"(*Root).Close", Method, 24},
{"(*Root).Create", Method, 24},
{"(*Root).FS", Method, 24},
{"(*Root).Lstat", Method, 24},
{"(*Root).Mkdir", Method, 24},
{"(*Root).Name", Method, 24},
{"(*Root).Open", Method, 24},
{"(*Root).OpenFile", Method, 24},
{"(*Root).OpenRoot", Method, 24},
{"(*Root).Remove", Method, 24},
{"(*Root).Stat", Method, 24},
{"(*SyscallError).Error", Method, 0},
{"(*SyscallError).Timeout", Method, 10},
{"(*SyscallError).Unwrap", Method, 13},
@ -9060,6 +9253,8 @@ var PackageSymbols = map[string][]Symbol{
{"O_WRONLY", Const, 0},
{"Open", Func, 0},
{"OpenFile", Func, 0},
{"OpenInRoot", Func, 24},
{"OpenRoot", Func, 24},
{"PathError", Type, 0},
{"PathError.Err", Field, 0},
{"PathError.Op", Field, 0},
@ -9081,6 +9276,7 @@ var PackageSymbols = map[string][]Symbol{
{"Remove", Func, 0},
{"RemoveAll", Func, 0},
{"Rename", Func, 0},
{"Root", Type, 24},
{"SEEK_CUR", Const, 0},
{"SEEK_END", Const, 0},
{"SEEK_SET", Const, 0},
@ -9422,6 +9618,7 @@ var PackageSymbols = map[string][]Symbol{
{"Zero", Func, 0},
},
"regexp": {
{"(*Regexp).AppendText", Method, 24},
{"(*Regexp).Copy", Method, 6},
{"(*Regexp).Expand", Method, 0},
{"(*Regexp).ExpandString", Method, 0},
@ -9602,6 +9799,8 @@ var PackageSymbols = map[string][]Symbol{
{"(*StackRecord).Stack", Method, 0},
{"(*TypeAssertionError).Error", Method, 0},
{"(*TypeAssertionError).RuntimeError", Method, 0},
{"(Cleanup).Stop", Method, 24},
{"AddCleanup", Func, 24},
{"BlockProfile", Func, 1},
{"BlockProfileRecord", Type, 1},
{"BlockProfileRecord.Count", Field, 1},
@ -9612,6 +9811,7 @@ var PackageSymbols = map[string][]Symbol{
{"Caller", Func, 0},
{"Callers", Func, 0},
{"CallersFrames", Func, 7},
{"Cleanup", Type, 24},
{"Compiler", Const, 0},
{"Error", Type, 0},
{"Frame", Type, 7},
@ -9974,6 +10174,8 @@ var PackageSymbols = map[string][]Symbol{
{"EqualFold", Func, 0},
{"Fields", Func, 0},
{"FieldsFunc", Func, 0},
{"FieldsFuncSeq", Func, 24},
{"FieldsSeq", Func, 24},
{"HasPrefix", Func, 0},
{"HasSuffix", Func, 0},
{"Index", Func, 0},
@ -9986,6 +10188,7 @@ var PackageSymbols = map[string][]Symbol{
{"LastIndexAny", Func, 0},
{"LastIndexByte", Func, 5},
{"LastIndexFunc", Func, 0},
{"Lines", Func, 24},
{"Map", Func, 0},
{"NewReader", Func, 0},
{"NewReplacer", Func, 0},
@ -9997,7 +10200,9 @@ var PackageSymbols = map[string][]Symbol{
{"Split", Func, 0},
{"SplitAfter", Func, 0},
{"SplitAfterN", Func, 0},
{"SplitAfterSeq", Func, 24},
{"SplitN", Func, 0},
{"SplitSeq", Func, 24},
{"Title", Func, 0},
{"ToLower", Func, 0},
{"ToLowerSpecial", Func, 0},
@ -16413,7 +16618,9 @@ var PackageSymbols = map[string][]Symbol{
{"ValueOf", Func, 0},
},
"testing": {
{"(*B).Chdir", Method, 24},
{"(*B).Cleanup", Method, 14},
{"(*B).Context", Method, 24},
{"(*B).Elapsed", Method, 20},
{"(*B).Error", Method, 0},
{"(*B).Errorf", Method, 0},
@ -16425,6 +16632,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*B).Helper", Method, 9},
{"(*B).Log", Method, 0},
{"(*B).Logf", Method, 0},
{"(*B).Loop", Method, 24},
{"(*B).Name", Method, 8},
{"(*B).ReportAllocs", Method, 1},
{"(*B).ReportMetric", Method, 13},
@ -16442,7 +16650,9 @@ var PackageSymbols = map[string][]Symbol{
{"(*B).StopTimer", Method, 0},
{"(*B).TempDir", Method, 15},
{"(*F).Add", Method, 18},
{"(*F).Chdir", Method, 24},
{"(*F).Cleanup", Method, 18},
{"(*F).Context", Method, 24},
{"(*F).Error", Method, 18},
{"(*F).Errorf", Method, 18},
{"(*F).Fail", Method, 18},
@ -16463,7 +16673,9 @@ var PackageSymbols = map[string][]Symbol{
{"(*F).TempDir", Method, 18},
{"(*M).Run", Method, 4},
{"(*PB).Next", Method, 3},
{"(*T).Chdir", Method, 24},
{"(*T).Cleanup", Method, 14},
{"(*T).Context", Method, 24},
{"(*T).Deadline", Method, 15},
{"(*T).Error", Method, 0},
{"(*T).Errorf", Method, 0},
@ -16954,7 +17166,9 @@ var PackageSymbols = map[string][]Symbol{
{"(Time).Add", Method, 0},
{"(Time).AddDate", Method, 0},
{"(Time).After", Method, 0},
{"(Time).AppendBinary", Method, 24},
{"(Time).AppendFormat", Method, 5},
{"(Time).AppendText", Method, 24},
{"(Time).Before", Method, 0},
{"(Time).Clock", Method, 0},
{"(Time).Compare", Method, 20},
@ -17428,4 +17642,9 @@ var PackageSymbols = map[string][]Symbol{
{"String", Func, 0},
{"StringData", Func, 0},
},
"weak": {
{"(Pointer).Value", Method, 24},
{"Make", Func, 24},
{"Pointer", Type, 24},
},
}