diff --git a/VERSION b/VERSION index 982968de533cfc..e438c586cadc3e 100644 --- a/VERSION +++ b/VERSION @@ -1,2 +1,2 @@ -go1.25rc2 -time 2025-07-02T21:52:16Z +go1.25.5-kb +time 2025-12-18T10:09:51Z diff --git a/doc/go_spec.html b/doc/go_spec.html index 183bc7fb372755..2b47fb2e834d83 100644 --- a/doc/go_spec.html +++ b/doc/go_spec.html @@ -1,6 +1,6 @@ diff --git a/doc/godebug.md b/doc/godebug.md index aaa0f9dd55e570..c12ce5311d90d1 100644 --- a/doc/godebug.md +++ b/doc/godebug.md @@ -153,6 +153,16 @@ for example, see the [runtime documentation](/pkg/runtime#hdr-Environment_Variables) and the [go command documentation](/cmd/go#hdr-Build_and_test_caching). +### Go 1.26 + +Go 1.26 added a new `httpcookiemaxnum` setting that controls the maximum number +of cookies that net/http will accept when parsing HTTP headers. If the number of +cookie in a header exceeds the number set in `httpcookiemaxnum`, cookie parsing +will fail early. The default value is `httpcookiemaxnum=3000`. Setting +`httpcookiemaxnum=0` will allow the cookie parsing to accept an indefinite +number of cookies. To avoid denial of service attacks, this setting and default +was backported to Go 1.25.2 and Go 1.24.8. + ### Go 1.25 Go 1.25 added a new `decoratemappings` setting that controls whether the Go diff --git a/lib/fips140/fips140.sum b/lib/fips140/fips140.sum index 66b1e23dfe619d..703d1dc60e58ab 100644 --- a/lib/fips140/fips140.sum +++ b/lib/fips140/fips140.sum @@ -9,4 +9,4 @@ # # go test cmd/go/internal/fips140 -update # -v1.0.0.zip b50508feaeff05d22516b21e1fd210bbf5d6a1e422eaf2cfa23fe379342713b8 +v1.0.0-c2097c7c.zip daf3614e0406f67ae6323c902db3f953a1effb199142362a039e7526dfb9368b diff --git a/lib/fips140/inprocess.txt b/lib/fips140/inprocess.txt index 0ec25f7505c14b..efd3caba85ba62 100644 --- a/lib/fips140/inprocess.txt +++ b/lib/fips140/inprocess.txt @@ -1 +1 @@ -v1.0.0 +v1.0.0-c2097c7c diff --git a/lib/fips140/v1.0.0.zip b/lib/fips140/v1.0.0-c2097c7c.zip similarity index 78% rename from lib/fips140/v1.0.0.zip rename to lib/fips140/v1.0.0-c2097c7c.zip index bd9d3c19d05b90..aabf762d0fecb2 100644 Binary files a/lib/fips140/v1.0.0.zip and b/lib/fips140/v1.0.0-c2097c7c.zip differ diff --git a/lib/fips140/v1.0.0.txt b/lib/fips140/v1.0.0.txt new file mode 100644 index 00000000000000..efd3caba85ba62 --- /dev/null +++ b/lib/fips140/v1.0.0.txt @@ -0,0 +1 @@ +v1.0.0-c2097c7c diff --git a/src/archive/tar/common.go b/src/archive/tar/common.go index 7b3945ff153144..ad31bbb64aaa5c 100644 --- a/src/archive/tar/common.go +++ b/src/archive/tar/common.go @@ -39,6 +39,7 @@ var ( errMissData = errors.New("archive/tar: sparse file references non-existent data") errUnrefData = errors.New("archive/tar: sparse file contains unreferenced data") errWriteHole = errors.New("archive/tar: write non-NUL byte in sparse hole") + errSparseTooLong = errors.New("archive/tar: sparse map too long") ) type headerError []string diff --git a/src/archive/tar/reader.go b/src/archive/tar/reader.go index 8483fb52a28f66..16ac2f5b17c28b 100644 --- a/src/archive/tar/reader.go +++ b/src/archive/tar/reader.go @@ -531,12 +531,17 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { cntNewline int64 buf bytes.Buffer blk block + totalSize int ) // feedTokens copies data in blocks from r into buf until there are // at least cnt newlines in buf. It will not read more blocks than needed. feedTokens := func(n int64) error { for cntNewline < n { + totalSize += len(blk) + if totalSize > maxSpecialFileSize { + return errSparseTooLong + } if _, err := mustReadFull(r, blk[:]); err != nil { return err } @@ -569,8 +574,8 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { } // Parse for all member entries. - // numEntries is trusted after this since a potential attacker must have - // committed resources proportional to what this library used. + // numEntries is trusted after this since feedTokens limits the number of + // tokens based on maxSpecialFileSize. if err := feedTokens(2 * numEntries); err != nil { return nil, err } diff --git a/src/archive/tar/reader_test.go b/src/archive/tar/reader_test.go index 99340a30471914..fca53dae741bd5 100644 --- a/src/archive/tar/reader_test.go +++ b/src/archive/tar/reader_test.go @@ -621,6 +621,11 @@ func TestReader(t *testing.T) { }, Format: FormatPAX, }}, + }, { + // Small compressed file that uncompresses to + // a file with a very large GNU 1.0 sparse map. + file: "testdata/gnu-sparse-many-zeros.tar.bz2", + err: errSparseTooLong, }} for _, v := range vectors { diff --git a/src/archive/tar/testdata/gnu-sparse-many-zeros.tar.bz2 b/src/archive/tar/testdata/gnu-sparse-many-zeros.tar.bz2 new file mode 100644 index 00000000000000..751d7fd4b68be1 Binary files /dev/null and b/src/archive/tar/testdata/gnu-sparse-many-zeros.tar.bz2 differ diff --git a/src/cmd/cgo/internal/testout/out_test.go b/src/cmd/cgo/internal/testout/out_test.go new file mode 100644 index 00000000000000..81dfa365871372 --- /dev/null +++ b/src/cmd/cgo/internal/testout/out_test.go @@ -0,0 +1,144 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package out_test + +import ( + "bufio" + "bytes" + "fmt" + "internal/testenv" + "internal/goarch" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "testing" +) + +type methodAlign struct { + Method string + Align int +} + +var wantAligns = map[string]int{ + "ReturnEmpty": 1, + "ReturnOnlyUint8": 1, + "ReturnOnlyUint16": 2, + "ReturnOnlyUint32": 4, + "ReturnOnlyUint64": goarch.PtrSize, + "ReturnOnlyInt": goarch.PtrSize, + "ReturnOnlyPtr": goarch.PtrSize, + "ReturnByteSlice": goarch.PtrSize, + "ReturnString": goarch.PtrSize, + "InputAndReturnUint8": 1, + "MixedTypes": goarch.PtrSize, +} + +// TestAligned tests that the generated _cgo_export.c file has the wanted +// align attributes for struct types used as arguments or results of +// //exported functions. +func TestAligned(t *testing.T) { + testenv.MustHaveGoRun(t) + testenv.MustHaveCGO(t) + + testdata, err := filepath.Abs("testdata") + if err != nil { + t.Fatal(err) + } + + objDir := t.TempDir() + + cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "cgo", + "-objdir", objDir, + filepath.Join(testdata, "aligned.go")) + cmd.Stderr = new(bytes.Buffer) + + err = cmd.Run() + if err != nil { + t.Fatalf("%#q: %v\n%s", cmd, err, cmd.Stderr) + } + + haveAligns, err := parseAlign(filepath.Join(objDir, "_cgo_export.c")) + if err != nil { + t.Fatal(err) + } + + // Check that we have all the wanted methods + if len(haveAligns) != len(wantAligns) { + t.Fatalf("have %d methods with aligned, want %d", len(haveAligns), len(wantAligns)) + } + + for i := range haveAligns { + method := haveAligns[i].Method + haveAlign := haveAligns[i].Align + + wantAlign, ok := wantAligns[method] + if !ok { + t.Errorf("method %s: have aligned %d, want missing entry", method, haveAlign) + } else if haveAlign != wantAlign { + t.Errorf("method %s: have aligned %d, want %d", method, haveAlign, wantAlign) + } + } +} + +func parseAlign(filename string) ([]methodAlign, error) { + file, err := os.Open(filename) + if err != nil { + return nil, fmt.Errorf("failed to open file: %w", err) + } + defer file.Close() + + var results []methodAlign + scanner := bufio.NewScanner(file) + + // Regex to match function declarations like "struct MethodName_return MethodName(" + funcRegex := regexp.MustCompile(`^struct\s+(\w+)_return\s+(\w+)\(`) + // Regex to match simple function declarations like "GoSlice MethodName(" + simpleFuncRegex := regexp.MustCompile(`^Go\w+\s+(\w+)\(`) + // Regex to match void-returning exported functions like "void ReturnEmpty(" + voidFuncRegex := regexp.MustCompile(`^void\s+(\w+)\(`) + // Regex to match align attributes like "__attribute__((aligned(8)))" + alignRegex := regexp.MustCompile(`__attribute__\(\(aligned\((\d+)\)\)\)`) + + var currentMethod string + + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + + // Check if this line declares a function with struct return type + if matches := funcRegex.FindStringSubmatch(line); matches != nil { + currentMethod = matches[2] // Extract the method name + } else if matches := simpleFuncRegex.FindStringSubmatch(line); matches != nil { + // Check if this line declares a function with simple return type (like GoSlice) + currentMethod = matches[1] // Extract the method name + } else if matches := voidFuncRegex.FindStringSubmatch(line); matches != nil { + // Check if this line declares a void-returning function + currentMethod = matches[1] // Extract the method name + } + + // Check if this line contains align information + if alignMatches := alignRegex.FindStringSubmatch(line); alignMatches != nil && currentMethod != "" { + alignStr := alignMatches[1] + align, err := strconv.Atoi(alignStr) + if err != nil { + // Skip this entry if we can't parse the align as integer + currentMethod = "" + continue + } + results = append(results, methodAlign{ + Method: currentMethod, + Align: align, + }) + currentMethod = "" // Reset for next method + } + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error reading file: %w", err) + } + + return results, nil +} diff --git a/src/cmd/cgo/internal/testout/testdata/aligned.go b/src/cmd/cgo/internal/testout/testdata/aligned.go new file mode 100644 index 00000000000000..cea6f2889a0cad --- /dev/null +++ b/src/cmd/cgo/internal/testout/testdata/aligned.go @@ -0,0 +1,63 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "C" + +//export ReturnEmpty +func ReturnEmpty() { + return +} + +//export ReturnOnlyUint8 +func ReturnOnlyUint8() (uint8, uint8, uint8) { + return 1, 2, 3 +} + +//export ReturnOnlyUint16 +func ReturnOnlyUint16() (uint16, uint16, uint16) { + return 1, 2, 3 +} + +//export ReturnOnlyUint32 +func ReturnOnlyUint32() (uint32, uint32, uint32) { + return 1, 2, 3 +} + +//export ReturnOnlyUint64 +func ReturnOnlyUint64() (uint64, uint64, uint64) { + return 1, 2, 3 +} + +//export ReturnOnlyInt +func ReturnOnlyInt() (int, int, int) { + return 1, 2, 3 +} + +//export ReturnOnlyPtr +func ReturnOnlyPtr() (*int, *int, *int) { + a, b, c := 1, 2, 3 + return &a, &b, &c +} + +//export ReturnString +func ReturnString() string { + return "hello" +} + +//export ReturnByteSlice +func ReturnByteSlice() []byte { + return []byte{1, 2, 3} +} + +//export InputAndReturnUint8 +func InputAndReturnUint8(a, b, c uint8) (uint8, uint8, uint8) { + return a, b, c +} + +//export MixedTypes +func MixedTypes(a uint8, b uint16, c uint32, d uint64, e int, f *int) (uint8, uint16, uint32, uint64, int, *int) { + return a, b, c, d, e, f +} diff --git a/src/cmd/cgo/out.go b/src/cmd/cgo/out.go index 10870b7c85cebd..046041ab3835ac 100644 --- a/src/cmd/cgo/out.go +++ b/src/cmd/cgo/out.go @@ -949,6 +949,8 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) { fmt.Fprintf(gotype, "struct {\n") off := int64(0) npad := 0 + // the align is at least 1 (for char) + maxAlign := int64(1) argField := func(typ ast.Expr, namePat string, args ...interface{}) { name := fmt.Sprintf(namePat, args...) t := p.cgoType(typ) @@ -963,6 +965,11 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) { noSourceConf.Fprint(gotype, fset, typ) fmt.Fprintf(gotype, "\n") off += t.Size + // keep track of the maximum alignment among all fields + // so that we can align the struct correctly + if t.Align > maxAlign { + maxAlign = t.Align + } } if fn.Recv != nil { argField(fn.Recv.List[0].Type, "recv") @@ -1051,7 +1058,11 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) { // string.h for memset, and is also robust to C++ // types with constructors. Both GCC and LLVM optimize // this into just zeroing _cgo_a. - fmt.Fprintf(fgcc, "\ttypedef %s %v _cgo_argtype;\n", ctype.String(), p.packedAttribute()) + // + // The struct should be aligned to the maximum alignment + // of any of its fields. This to avoid alignment + // issues. + fmt.Fprintf(fgcc, "\ttypedef %s %v __attribute__((aligned(%d))) _cgo_argtype;\n", ctype.String(), p.packedAttribute(), maxAlign) fmt.Fprintf(fgcc, "\tstatic _cgo_argtype _cgo_zero;\n") fmt.Fprintf(fgcc, "\t_cgo_argtype _cgo_a = _cgo_zero;\n") if gccResult != "void" && (len(fntype.Results.List) > 1 || len(fntype.Results.List[0].Names) > 1) { diff --git a/src/cmd/compile/internal/dwarfgen/dwarf.go b/src/cmd/compile/internal/dwarfgen/dwarf.go index 7d75c0c5ce38d7..4ded846ae4e657 100644 --- a/src/cmd/compile/internal/dwarfgen/dwarf.go +++ b/src/cmd/compile/internal/dwarfgen/dwarf.go @@ -128,14 +128,29 @@ func Info(ctxt *obj.Link, fnsym *obj.LSym, infosym *obj.LSym, curfn obj.Func) (s // already referenced by a dwarf var, attach an R_USETYPE relocation to // the function symbol to insure that the type included in DWARF // processing during linking. + // Do the same with R_USEIFACE relocations from the function symbol for the + // same reason. + // All these R_USETYPE relocations are only looked at if the function + // survives deadcode elimination in the linker. typesyms := []*obj.LSym{} for t := range fnsym.Func().Autot { typesyms = append(typesyms, t) } + for i := range fnsym.R { + if fnsym.R[i].Type == objabi.R_USEIFACE && !strings.HasPrefix(fnsym.R[i].Sym.Name, "go:itab.") { + // Types referenced through itab will be referenced from somewhere else + typesyms = append(typesyms, fnsym.R[i].Sym) + } + } slices.SortFunc(typesyms, func(a, b *obj.LSym) int { return strings.Compare(a.Name, b.Name) }) + var lastsym *obj.LSym for _, sym := range typesyms { + if sym == lastsym { + continue + } + lastsym = sym infosym.AddRel(ctxt, obj.Reloc{Type: objabi.R_USETYPE, Sym: sym}) } fnsym.Func().Autot = nil diff --git a/src/cmd/compile/internal/noder/reader.go b/src/cmd/compile/internal/noder/reader.go index 38b0bc1d8a4153..7256801965cc44 100644 --- a/src/cmd/compile/internal/noder/reader.go +++ b/src/cmd/compile/internal/noder/reader.go @@ -49,9 +49,6 @@ type pkgReader struct { // but bitwise inverted so we can detect if we're missing the entry // or not. newindex []index - - // indicates whether the data is reading during reshaping. - reshaping bool } func newPkgReader(pr pkgbits.PkgDecoder) *pkgReader { @@ -119,10 +116,6 @@ type reader struct { // find parameters/results. funarghack bool - // reshaping is used during reading exprReshape code, preventing - // the reader from shapifying the re-shaped type. - reshaping bool - // methodSym is the name of method's name, if reading a method. // It's nil if reading a normal function or closure body. methodSym *types.Sym @@ -937,8 +930,19 @@ func shapify(targ *types.Type, basic bool) *types.Type { // types, and discarding struct field names and tags. However, we'll // need to start tracking how type parameters are actually used to // implement some of these optimizations. + pointerShaping := basic && targ.IsPtr() && !targ.Elem().NotInHeap() + // The exception is when the type parameter is a pointer to a type + // which `Type.HasShape()` returns true, but `Type.IsShape()` returns + // false, like `*[]go.shape.T`. This is because the type parameter is + // used to instantiate a generic function inside another generic function. + // In this case, we want to keep the targ as-is, otherwise, we may lose the + // original type after `*[]go.shape.T` is shapified to `*go.shape.uint8`. + // See issue #54535, #71184. + if pointerShaping && !targ.Elem().IsShape() && targ.Elem().HasShape() { + return targ + } under := targ.Underlying() - if basic && targ.IsPtr() && !targ.Elem().NotInHeap() { + if pointerShaping { under = types.NewPtr(types.Types[types.TUINT8]) } @@ -1014,25 +1018,7 @@ func (pr *pkgReader) objDictIdx(sym *types.Sym, idx index, implicits, explicits // arguments. for i, targ := range dict.targs { basic := r.Bool() - isPointerShape := basic && targ.IsPtr() && !targ.Elem().NotInHeap() - // We should not do shapify during the reshaping process, see #71184. - // However, this only matters for shapify a pointer type, which will - // lose the original underlying type. - // - // Example with a pointer type: - // - // - First, shapifying *[]T -> *uint8 - // - During the reshaping process, *uint8 is shapified to *go.shape.uint8 - // - This ends up with a different type with the original *[]T - // - // For a non-pointer type: - // - // - int -> go.shape.int - // - go.shape.int -> go.shape.int - // - // We always end up with the identical type. - canShapify := !pr.reshaping || !isPointerShape - if dict.shaped && canShapify { + if dict.shaped { dict.targs[i] = shapify(targ, basic) } } @@ -2470,10 +2456,7 @@ func (r *reader) expr() (res ir.Node) { case exprReshape: typ := r.typ() - old := r.reshaping - r.reshaping = true x := r.expr() - r.reshaping = old if types.IdenticalStrict(x.Type(), typ) { return x @@ -2596,10 +2579,7 @@ func (r *reader) funcInst(pos src.XPos) (wrapperFn, baseFn, dictPtr ir.Node) { info := r.dict.subdicts[idx] explicits := r.p.typListIdx(info.explicits, r.dict) - old := r.p.reshaping - r.p.reshaping = r.reshaping baseFn = r.p.objIdx(info.idx, implicits, explicits, true).(*ir.Name) - r.p.reshaping = old // TODO(mdempsky): Is there a more robust way to get the // dictionary pointer type here? diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index eb2c3b31b8c998..4834f833c2f553 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -2555,7 +2555,7 @@ func rewriteStructStore(v *Value) *Value { // isDirectType reports whether v represents a type // (a *runtime._type) whose value is stored directly in an -// interface (i.e., is pointer or pointer-like). +// interface (i.e., is pointer or pointer-like) and is comparable. func isDirectType(v *Value) bool { return isDirectType1(v) } @@ -2571,7 +2571,8 @@ func isDirectType1(v *Value) bool { return false } if ti, ok := (*lsym.Extra).(*obj.TypeInfo); ok { - return types.IsDirectIface(ti.Type.(*types.Type)) + t := ti.Type.(*types.Type) + return types.IsDirectIface(t) && types.IsComparable(t) } } return false @@ -2588,7 +2589,7 @@ func isDirectType2(v *Value) bool { // isDirectIface reports whether v represents an itab // (a *runtime._itab) for a type whose value is stored directly -// in an interface (i.e., is pointer or pointer-like). +// in an interface (i.e., is pointer or pointer-like) and is comparable. func isDirectIface(v *Value) bool { return isDirectIface1(v, 9) } @@ -2607,7 +2608,8 @@ func isDirectIface1(v *Value, depth int) bool { return false } if ii, ok := (*lsym.Extra).(*obj.ItabInfo); ok { - return types.IsDirectIface(ii.Type.(*types.Type)) + t := ii.Type.(*types.Type) + return types.IsDirectIface(t) && types.IsComparable(t) } case OpConstNil: // We can treat this as direct, because if the itab is diff --git a/src/cmd/compile/internal/ssa/tighten.go b/src/cmd/compile/internal/ssa/tighten.go index eb5007b26e29e4..0a4b56d5781a24 100644 --- a/src/cmd/compile/internal/ssa/tighten.go +++ b/src/cmd/compile/internal/ssa/tighten.go @@ -124,18 +124,21 @@ func tighten(f *Func) { // If the target location is inside a loop, // move the target location up to just before the loop head. - for _, b := range f.Blocks { - origloop := loops.b2l[b.ID] - for _, v := range b.Values { - t := target[v.ID] - if t == nil { - continue - } - targetloop := loops.b2l[t.ID] - for targetloop != nil && (origloop == nil || targetloop.depth > origloop.depth) { - t = idom[targetloop.header.ID] - target[v.ID] = t - targetloop = loops.b2l[t.ID] + if !loops.hasIrreducible { + // Loop info might not be correct for irreducible loops. See issue 75569. + for _, b := range f.Blocks { + origloop := loops.b2l[b.ID] + for _, v := range b.Values { + t := target[v.ID] + if t == nil { + continue + } + targetloop := loops.b2l[t.ID] + for targetloop != nil && (origloop == nil || targetloop.depth > origloop.depth) { + t = idom[targetloop.header.ID] + target[v.ID] = t + targetloop = loops.b2l[t.ID] + } } } } diff --git a/src/cmd/compile/testdata/script/issue75461.txt b/src/cmd/compile/testdata/script/issue75461.txt new file mode 100644 index 00000000000000..05f0fd4cfae304 --- /dev/null +++ b/src/cmd/compile/testdata/script/issue75461.txt @@ -0,0 +1,78 @@ +go build main.go +! stdout . +! stderr . + +-- main.go -- +package main + +import ( + "demo/registry" +) + +func main() { + _ = registry.NewUserRegistry() +} + +-- go.mod -- +module demo + +go 1.24 + +-- model/user.go -- +package model + +type User struct { + ID int +} + +func (c *User) String() string { + return "" +} + +-- ordered/map.go -- +package ordered + +type OrderedMap[K comparable, V any] struct { + m map[K]V +} + +func New[K comparable, V any](options ...any) *OrderedMap[K, V] { + orderedMap := &OrderedMap[K, V]{} + return orderedMap +} + +-- registry/user.go -- +package registry + +import ( + "demo/model" + "demo/ordered" +) + +type baseRegistry = Registry[model.User, *model.User] + +type UserRegistry struct { + *baseRegistry +} + +type Registry[T any, P PStringer[T]] struct { + m *ordered.OrderedMap[string, P] +} + +type PStringer[T any] interface { + *T + String() string +} + +func NewRegistry[T any, P PStringer[T]]() *Registry[T, P] { + r := &Registry[T, P]{ + m: ordered.New[string, P](), + } + return r +} + +func NewUserRegistry() *UserRegistry { + return &UserRegistry{ + baseRegistry: NewRegistry[model.User](), + } +} diff --git a/src/cmd/go/internal/fips140/mkzip.go b/src/cmd/go/internal/fips140/mkzip.go index 7a6ba803248e37..a139a0f2e256f7 100644 --- a/src/cmd/go/internal/fips140/mkzip.go +++ b/src/cmd/go/internal/fips140/mkzip.go @@ -27,10 +27,10 @@ import ( "log" "os" "path/filepath" - "regexp" "strings" "golang.org/x/mod/module" + "golang.org/x/mod/semver" modzip "golang.org/x/mod/zip" ) @@ -61,7 +61,7 @@ func main() { // Must have valid version, and must not overwrite existing file. version := flag.Arg(0) - if !regexp.MustCompile(`^v\d+\.\d+\.\d+$`).MatchString(version) { + if semver.Canonical(version) != version { log.Fatalf("invalid version %q; must be vX.Y.Z", version) } if _, err := os.Stat(version + ".zip"); err == nil { @@ -117,7 +117,9 @@ func main() { if !bytes.Contains(contents, []byte(returnLine)) { log.Fatalf("did not find %q in fips140.go", returnLine) } - newLine := `return "` + version + `"` + // Use only the vX.Y.Z part of a possible vX.Y.Z-hash version. + v, _, _ := strings.Cut(version, "-") + newLine := `return "` + v + `"` contents = bytes.ReplaceAll(contents, []byte(returnLine), []byte(newLine)) wf, err := zw.Create(f.Name) if err != nil { diff --git a/src/cmd/go/internal/gover/mod.go b/src/cmd/go/internal/gover/mod.go index d3cc17068def6d..3ac5ae8824b233 100644 --- a/src/cmd/go/internal/gover/mod.go +++ b/src/cmd/go/internal/gover/mod.go @@ -109,6 +109,9 @@ func ModIsPrefix(path, vers string) bool { // The caller is assumed to have checked that ModIsValid(path, vers) is true. func ModIsPrerelease(path, vers string) bool { if IsToolchain(path) { + if path == "toolchain" { + return IsPrerelease(FromToolchain(vers)) + } return IsPrerelease(vers) } return semver.Prerelease(vers) != "" diff --git a/src/cmd/go/testdata/script/fipssnap.txt b/src/cmd/go/testdata/script/fipssnap.txt index 9888bc82f11d81..4d96aedf2a472d 100644 --- a/src/cmd/go/testdata/script/fipssnap.txt +++ b/src/cmd/go/testdata/script/fipssnap.txt @@ -1,4 +1,4 @@ -env snap=v1.0.0 +env snap=v1.0.0-c2097c7c env alias=inprocess env GOFIPS140=$snap @@ -23,8 +23,7 @@ stdout crypto/internal/fips140/$snap/sha256 ! stdout crypto/internal/fips140/check # again with GOFIPS140=$alias -# TODO: enable when we add inprocess.txt -# env GOFIPS140=$alias +env GOFIPS140=$alias # default GODEBUG includes fips140=on go list -f '{{.DefaultGODEBUG}}' diff --git a/src/cmd/go/testdata/script/mod_get_toolchain.txt b/src/cmd/go/testdata/script/mod_get_toolchain.txt index 87e84ae15ef987..83cef4a0fd0c5e 100644 --- a/src/cmd/go/testdata/script/mod_get_toolchain.txt +++ b/src/cmd/go/testdata/script/mod_get_toolchain.txt @@ -94,12 +94,14 @@ stderr '^go: added toolchain go1.24rc1$' grep 'go 1.22.9' go.mod # no longer implied grep 'toolchain go1.24rc1' go.mod -# go get toolchain@latest finds go1.999testmod. +# go get toolchain@latest finds go1.23.9. cp go.mod.orig go.mod go get toolchain@latest -stderr '^go: added toolchain go1.999testmod$' +stderr '^go: added toolchain go1.23.9$' grep 'go 1.21' go.mod -grep 'toolchain go1.999testmod' go.mod +grep 'toolchain go1.23.9' go.mod + + # Bug fixes. @@ -115,7 +117,7 @@ stderr '^go: upgraded go 1.19 => 1.21.0' # go get toolchain@1.24rc1 is OK too. go get toolchain@1.24rc1 -stderr '^go: downgraded toolchain go1.999testmod => go1.24rc1$' +stderr '^go: upgraded toolchain go1.23.9 => go1.24rc1$' # go get go@1.21 should work if we are the Go 1.21 language version, # even though there's no toolchain for it. diff --git a/src/context/context.go b/src/context/context.go index 4f150f6a1d6c7e..24bb18abd3dc27 100644 --- a/src/context/context.go +++ b/src/context/context.go @@ -463,6 +463,8 @@ func (c *cancelCtx) Done() <-chan struct{} { func (c *cancelCtx) Err() error { // An atomic load is ~5x faster than a mutex, which can matter in tight loops. if err := c.err.Load(); err != nil { + // Ensure the done channel has been closed before returning a non-nil error. + <-c.Done() return err.(error) } return nil diff --git a/src/context/x_test.go b/src/context/x_test.go index 937cab1445e7e8..0cf19688c3f5ee 100644 --- a/src/context/x_test.go +++ b/src/context/x_test.go @@ -1177,3 +1177,23 @@ func (c *customContext) Err() error { func (c *customContext) Value(key any) any { return c.parent.Value(key) } + +// Issue #75533. +func TestContextErrDoneRace(t *testing.T) { + // 4 iterations reliably reproduced #75533. + for range 10 { + ctx, cancel := WithCancel(Background()) + donec := ctx.Done() + go cancel() + for ctx.Err() == nil { + if runtime.GOARCH == "wasm" { + runtime.Gosched() // need to explicitly yield + } + } + select { + case <-donec: + default: + t.Fatalf("ctx.Err is non-nil, but ctx.Done is not closed") + } + } +} diff --git a/src/crypto/internal/cryptotest/hash.go b/src/crypto/internal/cryptotest/hash.go index f00e9c80d3c86a..37fd96a2d9d0b9 100644 --- a/src/crypto/internal/cryptotest/hash.go +++ b/src/crypto/internal/cryptotest/hash.go @@ -20,7 +20,7 @@ type MakeHash func() hash.Hash // TestHash performs a set of tests on hash.Hash implementations, checking the // documented requirements of Write, Sum, Reset, Size, and BlockSize. func TestHash(t *testing.T, mh MakeHash) { - if boring.Enabled || fips140.Version() == "v1.0" { + if boring.Enabled || fips140.Version() == "v1.0.0" { testhash.TestHashWithoutClone(t, testhash.MakeHash(mh)) return } diff --git a/src/crypto/internal/fips140/cast.go b/src/crypto/internal/fips140/cast.go index 66e21d8a90dbc9..3968dcadd4da8f 100644 --- a/src/crypto/internal/fips140/cast.go +++ b/src/crypto/internal/fips140/cast.go @@ -56,9 +56,10 @@ func CAST(name string, f func() error) { } // PCT runs the named Pairwise Consistency Test (if operated in FIPS mode) and -// returns any errors. If an error is returned, the key must not be used. +// aborts the program (stopping the module input/output and entering the "error +// state") if the test fails. // -// PCTs are mandatory for every key pair that is generated/imported, including +// PCTs are mandatory for every generated (but not imported) key pair, including // ephemeral keys (which effectively doubles the cost of key establishment). See // Implementation Guidance 10.3.A Additional Comment 1. // @@ -66,17 +67,23 @@ func CAST(name string, f func() error) { // // If a package p calls PCT during key generation, an invocation of that // function should be added to fipstest.TestConditionals. -func PCT(name string, f func() error) error { +func PCT(name string, f func() error) { if strings.ContainsAny(name, ",#=:") { panic("fips: invalid self-test name: " + name) } if !Enabled { - return nil + return } err := f() if name == failfipscast { err = errors.New("simulated PCT failure") } - return err + if err != nil { + fatal("FIPS 140-3 self-test failed: " + name + ": " + err.Error()) + panic("unreachable") + } + if debug { + println("FIPS 140-3 PCT passed:", name) + } } diff --git a/src/crypto/internal/fips140/ecdh/ecdh.go b/src/crypto/internal/fips140/ecdh/ecdh.go index bf71c75a92c6eb..967032aab28fc0 100644 --- a/src/crypto/internal/fips140/ecdh/ecdh.go +++ b/src/crypto/internal/fips140/ecdh/ecdh.go @@ -161,6 +161,27 @@ func GenerateKey[P Point[P]](c *Curve[P], rand io.Reader) (*PrivateKey, error) { if err != nil { continue } + + // A "Pairwise Consistency Test" makes no sense if we just generated the + // public key from an ephemeral private key. Moreover, there is no way to + // check it aside from redoing the exact same computation again. SP 800-56A + // Rev. 3, Section 5.6.2.1.4 acknowledges that, and doesn't require it. + // However, ISO 19790:2012, Section 7.10.3.3 has a blanket requirement for a + // PCT for all generated keys (AS10.35) and FIPS 140-3 IG 10.3.A, Additional + // Comment 1 goes out of its way to say that "the PCT shall be performed + // consistent [...], even if the underlying standard does not require a + // PCT". So we do it. And make ECDH nearly 50% slower (only) in FIPS mode. + fips140.PCT("ECDH PCT", func() error { + p1, err := c.newPoint().ScalarBaseMult(privateKey.d) + if err != nil { + return err + } + if !bytes.Equal(p1.Bytes(), privateKey.pub.q) { + return errors.New("crypto/ecdh: public key does not match private key") + } + return nil + }) + return privateKey, nil } } @@ -188,28 +209,6 @@ func NewPrivateKey[P Point[P]](c *Curve[P], key []byte) (*PrivateKey, error) { panic("crypto/ecdh: internal error: public key is the identity element") } - // A "Pairwise Consistency Test" makes no sense if we just generated the - // public key from an ephemeral private key. Moreover, there is no way to - // check it aside from redoing the exact same computation again. SP 800-56A - // Rev. 3, Section 5.6.2.1.4 acknowledges that, and doesn't require it. - // However, ISO 19790:2012, Section 7.10.3.3 has a blanket requirement for a - // PCT for all generated keys (AS10.35) and FIPS 140-3 IG 10.3.A, Additional - // Comment 1 goes out of its way to say that "the PCT shall be performed - // consistent [...], even if the underlying standard does not require a - // PCT". So we do it. And make ECDH nearly 50% slower (only) in FIPS mode. - if err := fips140.PCT("ECDH PCT", func() error { - p1, err := c.newPoint().ScalarBaseMult(key) - if err != nil { - return err - } - if !bytes.Equal(p1.Bytes(), publicKey) { - return errors.New("crypto/ecdh: public key does not match private key") - } - return nil - }); err != nil { - panic(err) - } - k := &PrivateKey{d: bytes.Clone(key), pub: PublicKey{curve: c.curve, q: publicKey}} return k, nil } diff --git a/src/crypto/internal/fips140/ecdsa/cast.go b/src/crypto/internal/fips140/ecdsa/cast.go index 219b7211e74e92..6bc9fd1f46d2d0 100644 --- a/src/crypto/internal/fips140/ecdsa/cast.go +++ b/src/crypto/internal/fips140/ecdsa/cast.go @@ -51,8 +51,8 @@ func testHash() []byte { } } -func fipsPCT[P Point[P]](c *Curve[P], k *PrivateKey) error { - return fips140.PCT("ECDSA PCT", func() error { +func fipsPCT[P Point[P]](c *Curve[P], k *PrivateKey) { + fips140.PCT("ECDSA PCT", func() error { hash := testHash() drbg := newDRBG(sha512.New, k.d, bits2octets(P256(), hash), nil) sig, err := sign(c, k, drbg, hash) diff --git a/src/crypto/internal/fips140/ecdsa/ecdsa.go b/src/crypto/internal/fips140/ecdsa/ecdsa.go index 47c1b2442144e4..81179de4f4e001 100644 --- a/src/crypto/internal/fips140/ecdsa/ecdsa.go +++ b/src/crypto/internal/fips140/ecdsa/ecdsa.go @@ -167,11 +167,6 @@ func NewPrivateKey[P Point[P]](c *Curve[P], D, Q []byte) (*PrivateKey, error) { return nil, err } priv := &PrivateKey{pub: *pub, d: d.Bytes(c.N)} - if err := fipsPCT(c, priv); err != nil { - // This can happen if the application went out of its way to make an - // ecdsa.PrivateKey with a mismatching PublicKey. - return nil, err - } return priv, nil } @@ -204,10 +199,7 @@ func GenerateKey[P Point[P]](c *Curve[P], rand io.Reader) (*PrivateKey, error) { }, d: k.Bytes(c.N), } - if err := fipsPCT(c, priv); err != nil { - // This clearly can't happen, but FIPS 140-3 mandates that we check it. - panic(err) - } + fipsPCT(c, priv) return priv, nil } diff --git a/src/crypto/internal/fips140/ecdsa/hmacdrbg.go b/src/crypto/internal/fips140/ecdsa/hmacdrbg.go index fa82ce39f90c81..698c23bcda5161 100644 --- a/src/crypto/internal/fips140/ecdsa/hmacdrbg.go +++ b/src/crypto/internal/fips140/ecdsa/hmacdrbg.go @@ -122,7 +122,7 @@ func newDRBG[H hash.Hash](hash func() H, entropy, nonce []byte, s personalizatio // // This should only be used for ACVP testing. hmacDRBG is not intended to be // used directly. -func TestingOnlyNewDRBG(hash func() hash.Hash, entropy, nonce []byte, s []byte) *hmacDRBG { +func TestingOnlyNewDRBG[H hash.Hash](hash func() H, entropy, nonce []byte, s []byte) *hmacDRBG { return newDRBG(hash, entropy, nonce, plainPersonalizationString(s)) } diff --git a/src/crypto/internal/fips140/ed25519/cast.go b/src/crypto/internal/fips140/ed25519/cast.go index a680c2514b816e..2a3426bd42f50e 100644 --- a/src/crypto/internal/fips140/ed25519/cast.go +++ b/src/crypto/internal/fips140/ed25519/cast.go @@ -12,8 +12,8 @@ import ( "sync" ) -func fipsPCT(k *PrivateKey) error { - return fips140.PCT("Ed25519 sign and verify PCT", func() error { +func fipsPCT(k *PrivateKey) { + fips140.PCT("Ed25519 sign and verify PCT", func() error { return pairwiseTest(k) }) } diff --git a/src/crypto/internal/fips140/ed25519/ed25519.go b/src/crypto/internal/fips140/ed25519/ed25519.go index bbdc5b4a8ba2f9..8beda341d941ad 100644 --- a/src/crypto/internal/fips140/ed25519/ed25519.go +++ b/src/crypto/internal/fips140/ed25519/ed25519.go @@ -69,10 +69,7 @@ func generateKey(priv *PrivateKey) (*PrivateKey, error) { fips140.RecordApproved() drbg.Read(priv.seed[:]) precomputePrivateKey(priv) - if err := fipsPCT(priv); err != nil { - // This clearly can't happen, but FIPS 140-3 requires that we check. - panic(err) - } + fipsPCT(priv) return priv, nil } @@ -88,10 +85,6 @@ func newPrivateKeyFromSeed(priv *PrivateKey, seed []byte) (*PrivateKey, error) { } copy(priv.seed[:], seed) precomputePrivateKey(priv) - if err := fipsPCT(priv); err != nil { - // This clearly can't happen, but FIPS 140-3 requires that we check. - panic(err) - } return priv, nil } @@ -137,12 +130,6 @@ func newPrivateKey(priv *PrivateKey, privBytes []byte) (*PrivateKey, error) { copy(priv.prefix[:], h[32:]) - if err := fipsPCT(priv); err != nil { - // This can happen if the application messed with the private key - // encoding, and the public key doesn't match the seed anymore. - return nil, err - } - return priv, nil } diff --git a/src/crypto/internal/fips140/fips140.go b/src/crypto/internal/fips140/fips140.go index 050967f4808ad6..e48706fbd50bb9 100644 --- a/src/crypto/internal/fips140/fips140.go +++ b/src/crypto/internal/fips140/fips140.go @@ -7,7 +7,6 @@ package fips140 import ( "crypto/internal/fips140deps/godebug" "errors" - "hash" "runtime" ) @@ -63,16 +62,10 @@ func Name() string { return "Go Cryptographic Module" } -// Version returns the formal version (such as "v1.0") if building against a +// Version returns the formal version (such as "v1.0.0") if building against a // frozen module with GOFIPS140. Otherwise, it returns "latest". func Version() string { // This return value is replaced by mkzip.go, it must not be changed or // moved to a different file. return "latest" //mkzip:version } - -// Hash is a legacy compatibility alias for hash.Hash. -// -// It's only here because [crypto/internal/fips140/ecdsa.TestingOnlyNewDRBG] -// takes a "func() fips140.Hash" in v1.0.0, instead of being generic. -type Hash = hash.Hash diff --git a/src/crypto/internal/fips140/mlkem/mlkem1024.go b/src/crypto/internal/fips140/mlkem/mlkem1024.go index 034bf3b5d6682d..1419cf20fa9c67 100644 --- a/src/crypto/internal/fips140/mlkem/mlkem1024.go +++ b/src/crypto/internal/fips140/mlkem/mlkem1024.go @@ -118,10 +118,7 @@ func generateKey1024(dk *DecapsulationKey1024) (*DecapsulationKey1024, error) { var z [32]byte drbg.Read(z[:]) kemKeyGen1024(dk, &d, &z) - if err := fips140.PCT("ML-KEM PCT", func() error { return kemPCT1024(dk) }); err != nil { - // This clearly can't happen, but FIPS 140-3 requires us to check. - panic(err) - } + fips140.PCT("ML-KEM PCT", func() error { return kemPCT1024(dk) }) fips140.RecordApproved() return dk, nil } @@ -149,10 +146,6 @@ func newKeyFromSeed1024(dk *DecapsulationKey1024, seed []byte) (*DecapsulationKe d := (*[32]byte)(seed[:32]) z := (*[32]byte)(seed[32:]) kemKeyGen1024(dk, d, z) - if err := fips140.PCT("ML-KEM PCT", func() error { return kemPCT1024(dk) }); err != nil { - // This clearly can't happen, but FIPS 140-3 requires us to check. - panic(err) - } fips140.RecordApproved() return dk, nil } diff --git a/src/crypto/internal/fips140/mlkem/mlkem768.go b/src/crypto/internal/fips140/mlkem/mlkem768.go index 77043830d4d962..298660e4e977dd 100644 --- a/src/crypto/internal/fips140/mlkem/mlkem768.go +++ b/src/crypto/internal/fips140/mlkem/mlkem768.go @@ -177,10 +177,7 @@ func generateKey(dk *DecapsulationKey768) (*DecapsulationKey768, error) { var z [32]byte drbg.Read(z[:]) kemKeyGen(dk, &d, &z) - if err := fips140.PCT("ML-KEM PCT", func() error { return kemPCT(dk) }); err != nil { - // This clearly can't happen, but FIPS 140-3 requires us to check. - panic(err) - } + fips140.PCT("ML-KEM PCT", func() error { return kemPCT(dk) }) fips140.RecordApproved() return dk, nil } @@ -208,10 +205,6 @@ func newKeyFromSeed(dk *DecapsulationKey768, seed []byte) (*DecapsulationKey768, d := (*[32]byte)(seed[:32]) z := (*[32]byte)(seed[32:]) kemKeyGen(dk, d, z) - if err := fips140.PCT("ML-KEM PCT", func() error { return kemPCT(dk) }); err != nil { - // This clearly can't happen, but FIPS 140-3 requires us to check. - panic(err) - } fips140.RecordApproved() return dk, nil } diff --git a/src/crypto/internal/fips140/rsa/keygen.go b/src/crypto/internal/fips140/rsa/keygen.go index 7c0272239ff3cf..00b325d24b2211 100644 --- a/src/crypto/internal/fips140/rsa/keygen.go +++ b/src/crypto/internal/fips140/rsa/keygen.go @@ -105,7 +105,28 @@ func GenerateKey(rand io.Reader, bits int) (*PrivateKey, error) { // negligible chance of failure we can defer the check to the end of key // generation and return an error if it fails. See [checkPrivateKey]. - return newPrivateKey(N, 65537, d, P, Q) + k, err := newPrivateKey(N, 65537, d, P, Q) + if err != nil { + return nil, err + } + + if k.fipsApproved { + fips140.PCT("RSA sign and verify PCT", func() error { + hash := []byte{ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + } + sig, err := signPKCS1v15(k, "SHA-256", hash) + if err != nil { + return err + } + return verifyPKCS1v15(k.PublicKey(), "SHA-256", hash, sig) + }) + } + + return k, nil } } diff --git a/src/crypto/internal/fips140/rsa/rsa.go b/src/crypto/internal/fips140/rsa/rsa.go index 0bbf7010506107..764338940a3282 100644 --- a/src/crypto/internal/fips140/rsa/rsa.go +++ b/src/crypto/internal/fips140/rsa/rsa.go @@ -310,26 +310,6 @@ func checkPrivateKey(priv *PrivateKey) error { return errors.New("crypto/rsa: d too small") } - // If the key is still in scope for FIPS mode, perform a Pairwise - // Consistency Test. - if priv.fipsApproved { - if err := fips140.PCT("RSA sign and verify PCT", func() error { - hash := []byte{ - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, - 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, - 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, - 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, - } - sig, err := signPKCS1v15(priv, "SHA-256", hash) - if err != nil { - return err - } - return verifyPKCS1v15(priv.PublicKey(), "SHA-256", hash, sig) - }); err != nil { - return err - } - } - return nil } diff --git a/src/crypto/internal/fips140/subtle/xor_asm.go b/src/crypto/internal/fips140/subtle/xor_asm.go index b07239da3e31c1..bb85aefef4013e 100644 --- a/src/crypto/internal/fips140/subtle/xor_asm.go +++ b/src/crypto/internal/fips140/subtle/xor_asm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (amd64 || arm64 || mips || mipsle || mips64 || mips64le || ppc64 || ppc64le || riscv64) && !purego +//go:build (amd64 || arm64 || ppc64 || ppc64le || riscv64) && !purego package subtle diff --git a/src/crypto/internal/fips140/subtle/xor_generic.go b/src/crypto/internal/fips140/subtle/xor_generic.go index ed484bc630e98d..0b31eec60197d3 100644 --- a/src/crypto/internal/fips140/subtle/xor_generic.go +++ b/src/crypto/internal/fips140/subtle/xor_generic.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (!amd64 && !arm64 && !loong64 && !mips && !mipsle && !mips64 && !mips64le && !ppc64 && !ppc64le && !riscv64) || purego +//go:build (!amd64 && !arm64 && !loong64 && !ppc64 && !ppc64le && !riscv64) || purego package subtle diff --git a/src/crypto/internal/fips140/subtle/xor_mips64x.s b/src/crypto/internal/fips140/subtle/xor_mips64x.s deleted file mode 100644 index e580235914aeaf..00000000000000 --- a/src/crypto/internal/fips140/subtle/xor_mips64x.s +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (mips64 || mips64le) && !purego - -#include "textflag.h" - -// func xorBytes(dst, a, b *byte, n int) -TEXT ·xorBytes(SB), NOSPLIT|NOFRAME, $0 - MOVV dst+0(FP), R1 - MOVV a+8(FP), R2 - MOVV b+16(FP), R3 - MOVV n+24(FP), R4 - -xor_64_check: - SGTU $64, R4, R5 // R5 = 1 if (64 > R4) - BNE R5, xor_32_check -xor_64: - MOVV (R2), R6 - MOVV 8(R2), R7 - MOVV 16(R2), R8 - MOVV 24(R2), R9 - MOVV (R3), R10 - MOVV 8(R3), R11 - MOVV 16(R3), R12 - MOVV 24(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVV R10, (R1) - MOVV R11, 8(R1) - MOVV R12, 16(R1) - MOVV R13, 24(R1) - MOVV 32(R2), R6 - MOVV 40(R2), R7 - MOVV 48(R2), R8 - MOVV 56(R2), R9 - MOVV 32(R3), R10 - MOVV 40(R3), R11 - MOVV 48(R3), R12 - MOVV 56(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVV R10, 32(R1) - MOVV R11, 40(R1) - MOVV R12, 48(R1) - MOVV R13, 56(R1) - ADDV $64, R2 - ADDV $64, R3 - ADDV $64, R1 - SUBV $64, R4 - SGTU $64, R4, R5 - BEQ R0, R5, xor_64 - BEQ R0, R4, end - -xor_32_check: - SGTU $32, R4, R5 - BNE R5, xor_16_check -xor_32: - MOVV (R2), R6 - MOVV 8(R2), R7 - MOVV 16(R2), R8 - MOVV 24(R2), R9 - MOVV (R3), R10 - MOVV 8(R3), R11 - MOVV 16(R3), R12 - MOVV 24(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVV R10, (R1) - MOVV R11, 8(R1) - MOVV R12, 16(R1) - MOVV R13, 24(R1) - ADDV $32, R2 - ADDV $32, R3 - ADDV $32, R1 - SUBV $32, R4 - BEQ R0, R4, end - -xor_16_check: - SGTU $16, R4, R5 - BNE R5, xor_8_check -xor_16: - MOVV (R2), R6 - MOVV 8(R2), R7 - MOVV (R3), R8 - MOVV 8(R3), R9 - XOR R6, R8 - XOR R7, R9 - MOVV R8, (R1) - MOVV R9, 8(R1) - ADDV $16, R2 - ADDV $16, R3 - ADDV $16, R1 - SUBV $16, R4 - BEQ R0, R4, end - -xor_8_check: - SGTU $8, R4, R5 - BNE R5, xor_4_check -xor_8: - MOVV (R2), R6 - MOVV (R3), R7 - XOR R6, R7 - MOVV R7, (R1) - ADDV $8, R1 - ADDV $8, R2 - ADDV $8, R3 - SUBV $8, R4 - BEQ R0, R4, end - -xor_4_check: - SGTU $4, R4, R5 - BNE R5, xor_2_check -xor_4: - MOVW (R2), R6 - MOVW (R3), R7 - XOR R6, R7 - MOVW R7, (R1) - ADDV $4, R2 - ADDV $4, R3 - ADDV $4, R1 - SUBV $4, R4 - BEQ R0, R4, end - -xor_2_check: - SGTU $2, R4, R5 - BNE R5, xor_1 -xor_2: - MOVH (R2), R6 - MOVH (R3), R7 - XOR R6, R7 - MOVH R7, (R1) - ADDV $2, R2 - ADDV $2, R3 - ADDV $2, R1 - SUBV $2, R4 - BEQ R0, R4, end - -xor_1: - MOVB (R2), R6 - MOVB (R3), R7 - XOR R6, R7 - MOVB R7, (R1) - -end: - RET diff --git a/src/crypto/internal/fips140/subtle/xor_mipsx.s b/src/crypto/internal/fips140/subtle/xor_mipsx.s deleted file mode 100644 index 1a6b3f409dddc9..00000000000000 --- a/src/crypto/internal/fips140/subtle/xor_mipsx.s +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (mips || mipsle) && !purego - -#include "textflag.h" - -// func xorBytes(dst, a, b *byte, n int) -TEXT ·xorBytes(SB), NOSPLIT|NOFRAME, $0 - MOVW dst+0(FP), R1 - MOVW a+4(FP), R2 - MOVW b+8(FP), R3 - MOVW n+12(FP), R4 - - SGTU $64, R4, R5 // R5 = 1 if (64 > R4) - BNE R5, xor_32_check -xor_64: - MOVW (R2), R6 - MOVW 4(R2), R7 - MOVW 8(R2), R8 - MOVW 12(R2), R9 - MOVW (R3), R10 - MOVW 4(R3), R11 - MOVW 8(R3), R12 - MOVW 12(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, (R1) - MOVW R11, 4(R1) - MOVW R12, 8(R1) - MOVW R13, 12(R1) - MOVW 16(R2), R6 - MOVW 20(R2), R7 - MOVW 24(R2), R8 - MOVW 28(R2), R9 - MOVW 16(R3), R10 - MOVW 20(R3), R11 - MOVW 24(R3), R12 - MOVW 28(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, 16(R1) - MOVW R11, 20(R1) - MOVW R12, 24(R1) - MOVW R13, 28(R1) - MOVW 32(R2), R6 - MOVW 36(R2), R7 - MOVW 40(R2), R8 - MOVW 44(R2), R9 - MOVW 32(R3), R10 - MOVW 36(R3), R11 - MOVW 40(R3), R12 - MOVW 44(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, 32(R1) - MOVW R11, 36(R1) - MOVW R12, 40(R1) - MOVW R13, 44(R1) - MOVW 48(R2), R6 - MOVW 52(R2), R7 - MOVW 56(R2), R8 - MOVW 60(R2), R9 - MOVW 48(R3), R10 - MOVW 52(R3), R11 - MOVW 56(R3), R12 - MOVW 60(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, 48(R1) - MOVW R11, 52(R1) - MOVW R12, 56(R1) - MOVW R13, 60(R1) - ADD $64, R2 - ADD $64, R3 - ADD $64, R1 - SUB $64, R4 - SGTU $64, R4, R5 - BEQ R0, R5, xor_64 - BEQ R0, R4, end - -xor_32_check: - SGTU $32, R4, R5 - BNE R5, xor_16_check -xor_32: - MOVW (R2), R6 - MOVW 4(R2), R7 - MOVW 8(R2), R8 - MOVW 12(R2), R9 - MOVW (R3), R10 - MOVW 4(R3), R11 - MOVW 8(R3), R12 - MOVW 12(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, (R1) - MOVW R11, 4(R1) - MOVW R12, 8(R1) - MOVW R13, 12(R1) - MOVW 16(R2), R6 - MOVW 20(R2), R7 - MOVW 24(R2), R8 - MOVW 28(R2), R9 - MOVW 16(R3), R10 - MOVW 20(R3), R11 - MOVW 24(R3), R12 - MOVW 28(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, 16(R1) - MOVW R11, 20(R1) - MOVW R12, 24(R1) - MOVW R13, 28(R1) - ADD $32, R2 - ADD $32, R3 - ADD $32, R1 - SUB $32, R4 - BEQ R0, R4, end - -xor_16_check: - SGTU $16, R4, R5 - BNE R5, xor_8_check -xor_16: - MOVW (R2), R6 - MOVW 4(R2), R7 - MOVW 8(R2), R8 - MOVW 12(R2), R9 - MOVW (R3), R10 - MOVW 4(R3), R11 - MOVW 8(R3), R12 - MOVW 12(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, (R1) - MOVW R11, 4(R1) - MOVW R12, 8(R1) - MOVW R13, 12(R1) - ADD $16, R2 - ADD $16, R3 - ADD $16, R1 - SUB $16, R4 - BEQ R0, R4, end - -xor_8_check: - SGTU $8, R4, R5 - BNE R5, xor_4_check -xor_8: - MOVW (R2), R6 - MOVW 4(R2), R7 - MOVW (R3), R8 - MOVW 4(R3), R9 - XOR R6, R8 - XOR R7, R9 - MOVW R8, (R1) - MOVW R9, 4(R1) - ADD $8, R1 - ADD $8, R2 - ADD $8, R3 - SUB $8, R4 - BEQ R0, R4, end - -xor_4_check: - SGTU $4, R4, R5 - BNE R5, xor_2_check -xor_4: - MOVW (R2), R6 - MOVW (R3), R7 - XOR R6, R7 - MOVW R7, (R1) - ADD $4, R2 - ADD $4, R3 - ADD $4, R1 - SUB $4, R4 - BEQ R0, R4, end - -xor_2_check: - SGTU $2, R4, R5 - BNE R5, xor_1 -xor_2: - MOVH (R2), R6 - MOVH (R3), R7 - XOR R6, R7 - MOVH R7, (R1) - ADD $2, R2 - ADD $2, R3 - ADD $2, R1 - SUB $2, R4 - BEQ R0, R4, end - -xor_1: - MOVB (R2), R6 - MOVB (R3), R7 - XOR R6, R7 - MOVB R7, (R1) - -end: - RET diff --git a/src/crypto/internal/fips140test/acvp_test.go b/src/crypto/internal/fips140test/acvp_test.go index 5871bde8be4b2b..47a42cce1bcc22 100644 --- a/src/crypto/internal/fips140test/acvp_test.go +++ b/src/crypto/internal/fips140test/acvp_test.go @@ -1624,7 +1624,7 @@ func cmdHmacDrbgAft(h func() hash.Hash) command { // * Uninstantiate // See Table 7 in draft-vassilev-acvp-drbg out := make([]byte, outLen) - drbg := ecdsa.TestingOnlyNewDRBG(func() fips140.Hash { return h() }, entropy, nonce, personalization) + drbg := ecdsa.TestingOnlyNewDRBG(h, entropy, nonce, personalization) drbg.Generate(out) drbg.Generate(out) diff --git a/src/crypto/internal/fips140test/cast_test.go b/src/crypto/internal/fips140test/cast_test.go index 1818b583d5ce2d..14ab72d1713a72 100644 --- a/src/crypto/internal/fips140test/cast_test.go +++ b/src/crypto/internal/fips140test/cast_test.go @@ -5,9 +5,9 @@ package fipstest import ( + "crypto" + "crypto/internal/fips140" "crypto/rand" - "crypto/x509" - "encoding/pem" "fmt" "internal/testenv" "io/fs" @@ -50,8 +50,6 @@ var allCASTs = []string{ "KAS-ECC-SSC P-256", "ML-KEM PCT", "ML-KEM PCT", - "ML-KEM PCT", - "ML-KEM PCT", "ML-KEM-768", "PBKDF2", "RSA sign and verify PCT", @@ -107,60 +105,65 @@ func TestAllCASTs(t *testing.T) { // TestConditionals causes the conditional CASTs and PCTs to be invoked. func TestConditionals(t *testing.T) { mlkem.GenerateKey768() - k, err := ecdh.GenerateKey(ecdh.P256(), rand.Reader) + kDH, err := ecdh.GenerateKey(ecdh.P256(), rand.Reader) if err != nil { - t.Fatal(err) + t.Error(err) + } else { + ecdh.ECDH(ecdh.P256(), kDH, kDH.PublicKey()) } - ecdh.ECDH(ecdh.P256(), k, k.PublicKey()) kDSA, err := ecdsa.GenerateKey(ecdsa.P256(), rand.Reader) if err != nil { - t.Fatal(err) + t.Error(err) + } else { + ecdsa.SignDeterministic(ecdsa.P256(), sha256.New, kDSA, make([]byte, 32)) } - ecdsa.SignDeterministic(ecdsa.P256(), sha256.New, kDSA, make([]byte, 32)) k25519, err := ed25519.GenerateKey() if err != nil { - t.Fatal(err) + t.Error(err) + } else { + ed25519.Sign(k25519, make([]byte, 32)) } - ed25519.Sign(k25519, make([]byte, 32)) - rsa.VerifyPKCS1v15(&rsa.PublicKey{}, "", nil, nil) - // Parse an RSA key to hit the PCT rather than generating one (which is slow). - block, _ := pem.Decode([]byte(strings.ReplaceAll( - `-----BEGIN RSA TESTING KEY----- -MIIEowIBAAKCAQEAsPnoGUOnrpiSqt4XynxA+HRP7S+BSObI6qJ7fQAVSPtRkqso -tWxQYLEYzNEx5ZSHTGypibVsJylvCfuToDTfMul8b/CZjP2Ob0LdpYrNH6l5hvFE -89FU1nZQF15oVLOpUgA7wGiHuEVawrGfey92UE68mOyUVXGweJIVDdxqdMoPvNNU -l86BU02vlBiESxOuox+dWmuVV7vfYZ79Toh/LUK43YvJh+rhv4nKuF7iHjVjBd9s -B6iDjj70HFldzOQ9r8SRI+9NirupPTkF5AKNe6kUhKJ1luB7S27ZkvB3tSTT3P59 -3VVJvnzOjaA1z6Cz+4+eRvcysqhrRgFlwI9TEwIDAQABAoIBAEEYiyDP29vCzx/+ -dS3LqnI5BjUuJhXUnc6AWX/PCgVAO+8A+gZRgvct7PtZb0sM6P9ZcLrweomlGezI -FrL0/6xQaa8bBr/ve/a8155OgcjFo6fZEw3Dz7ra5fbSiPmu4/b/kvrg+Br1l77J -aun6uUAs1f5B9wW+vbR7tzbT/mxaUeDiBzKpe15GwcvbJtdIVMa2YErtRjc1/5B2 -BGVXyvlJv0SIlcIEMsHgnAFOp1ZgQ08aDzvilLq8XVMOahAhP1O2A3X8hKdXPyrx -IVWE9bS9ptTo+eF6eNl+d7htpKGEZHUxinoQpWEBTv+iOoHsVunkEJ3vjLP3lyI/ -fY0NQ1ECgYEA3RBXAjgvIys2gfU3keImF8e/TprLge1I2vbWmV2j6rZCg5r/AS0u -pii5CvJ5/T5vfJPNgPBy8B/yRDs+6PJO1GmnlhOkG9JAIPkv0RBZvR0PMBtbp6nT -Y3yo1lwamBVBfY6rc0sLTzosZh2aGoLzrHNMQFMGaauORzBFpY5lU50CgYEAzPHl -u5DI6Xgep1vr8QvCUuEesCOgJg8Yh1UqVoY/SmQh6MYAv1I9bLGwrb3WW/7kqIoD -fj0aQV5buVZI2loMomtU9KY5SFIsPV+JuUpy7/+VE01ZQM5FdY8wiYCQiVZYju9X -Wz5LxMNoz+gT7pwlLCsC4N+R8aoBk404aF1gum8CgYAJ7VTq7Zj4TFV7Soa/T1eE -k9y8a+kdoYk3BASpCHJ29M5R2KEA7YV9wrBklHTz8VzSTFTbKHEQ5W5csAhoL5Fo -qoHzFFi3Qx7MHESQb9qHyolHEMNx6QdsHUn7rlEnaTTyrXh3ifQtD6C0yTmFXUIS -CW9wKApOrnyKJ9nI0HcuZQKBgQCMtoV6e9VGX4AEfpuHvAAnMYQFgeBiYTkBKltQ -XwozhH63uMMomUmtSG87Sz1TmrXadjAhy8gsG6I0pWaN7QgBuFnzQ/HOkwTm+qKw -AsrZt4zeXNwsH7QXHEJCFnCmqw9QzEoZTrNtHJHpNboBuVnYcoueZEJrP8OnUG3r -UjmopwKBgAqB2KYYMUqAOvYcBnEfLDmyZv9BTVNHbR2lKkMYqv5LlvDaBxVfilE0 -2riO4p6BaAdvzXjKeRrGNEKoHNBpOSfYCOM16NjL8hIZB1CaV3WbT5oY+jp7Mzd5 -7d56RZOE+ERK2uz/7JX9VSsM/LbH9pJibd4e8mikDS9ntciqOH/3 ------END RSA TESTING KEY-----`, "TESTING KEY", "PRIVATE KEY"))) - if _, err := x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { - t.Fatal(err) + kRSA, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Error(err) + } else { + rsa.SignPKCS1v15(kRSA, crypto.SHA256.String(), make([]byte, 32)) } t.Log("completed successfully") } +func TestCASTPasses(t *testing.T) { + moduleStatus(t) + testenv.MustHaveExec(t) + if err := fips140.Supported(); err != nil { + t.Skipf("test requires FIPS 140 mode: %v", err) + } + + cmd := testenv.Command(t, testenv.Executable(t), "-test.run=^TestConditionals$", "-test.v") + cmd.Env = append(cmd.Env, "GODEBUG=fips140=debug") + out, err := cmd.CombinedOutput() + t.Logf("%s", out) + if err != nil || !strings.Contains(string(out), "completed successfully") { + t.Errorf("TestConditionals did not complete successfully") + } + + for _, name := range allCASTs { + t.Run(name, func(t *testing.T) { + if !strings.Contains(string(out), fmt.Sprintf("passed: %s\n", name)) { + t.Errorf("CAST/PCT %s success was not logged", name) + } else { + t.Logf("CAST/PCT succeeded: %s", name) + } + }) + } +} + func TestCASTFailures(t *testing.T) { moduleStatus(t) testenv.MustHaveExec(t) + if err := fips140.Supported(); err != nil { + t.Skipf("test requires FIPS 140 mode: %v", err) + } for _, name := range allCASTs { t.Run(name, func(t *testing.T) { @@ -169,7 +172,6 @@ func TestCASTFailures(t *testing.T) { if !testing.Verbose() { t.Parallel() } - t.Logf("CAST/PCT succeeded: %s", name) t.Logf("Testing CAST/PCT failure...") cmd := testenv.Command(t, testenv.Executable(t), "-test.run=^TestConditionals$", "-test.v") cmd.Env = append(cmd.Env, fmt.Sprintf("GODEBUG=failfipscast=%s,fips140=on", name)) @@ -180,6 +182,8 @@ func TestCASTFailures(t *testing.T) { } if strings.Contains(string(out), "completed successfully") { t.Errorf("CAST/PCT %s failure did not stop the program", name) + } else if !strings.Contains(string(out), "self-test failed: "+name) { + t.Errorf("CAST/PCT %s failure did not log the expected message", name) } else { t.Logf("CAST/PCT %s failed as expected and caused the program to exit", name) } diff --git a/src/crypto/internal/fips140test/fips_test.go b/src/crypto/internal/fips140test/fips_test.go index 08d60933ef38b8..52fc9d34886003 100644 --- a/src/crypto/internal/fips140test/fips_test.go +++ b/src/crypto/internal/fips140test/fips_test.go @@ -74,11 +74,9 @@ func TestVersion(t *testing.T) { continue } exp := setting.Value - if exp == "v1.0.0" { - // Unfortunately we enshrined the version of the first module as - // v1.0 before deciding to go for full versions. - exp = "v1.0" - } + // Remove the -hash suffix, if any. + // The version from fips140.Version omits it. + exp, _, _ = strings.Cut(exp, "-") if v := fips140.Version(); v != exp { t.Errorf("Version is %q, expected %q", v, exp) } diff --git a/src/crypto/tls/handshake_server.go b/src/crypto/tls/handshake_server.go index 1e0b5f06672d15..088c66fadb2a44 100644 --- a/src/crypto/tls/handshake_server.go +++ b/src/crypto/tls/handshake_server.go @@ -357,7 +357,7 @@ func negotiateALPN(serverProtos, clientProtos []string, quic bool) (string, erro if http11fallback { return "", nil } - return "", fmt.Errorf("tls: client requested unsupported application protocols (%s)", clientProtos) + return "", fmt.Errorf("tls: client requested unsupported application protocols (%q)", clientProtos) } // supportsECDHE returns whether ECDHE key exchanges can be used with this diff --git a/src/crypto/x509/name_constraints_test.go b/src/crypto/x509/name_constraints_test.go index a5851845164d10..bc91b28401fce5 100644 --- a/src/crypto/x509/name_constraints_test.go +++ b/src/crypto/x509/name_constraints_test.go @@ -1624,6 +1624,40 @@ var nameConstraintsTests = []nameConstraintsTest{ }, expectedError: "URI with IP", }, + // #87: subdomain excluded constraints preclude wildcard names + { + roots: []constraintsSpec{ + { + bad: []string{"dns:foo.example.com"}, + }, + }, + intermediates: [][]constraintsSpec{ + { + {}, + }, + }, + leaf: leafSpec{ + sans: []string{"dns:*.example.com"}, + }, + expectedError: "\"*.example.com\" is excluded by constraint \"foo.example.com\"", + }, + // #88: wildcard names are not matched by subdomain permitted constraints + { + roots: []constraintsSpec{ + { + ok: []string{"dns:foo.example.com"}, + }, + }, + intermediates: [][]constraintsSpec{ + { + {}, + }, + }, + leaf: leafSpec{ + sans: []string{"dns:*.example.com"}, + }, + expectedError: "\"*.example.com\" is not permitted", + }, } func makeConstraintsCACert(constraints constraintsSpec, name string, key *ecdsa.PrivateKey, parent *Certificate, parentKey *ecdsa.PrivateKey) (*Certificate, error) { diff --git a/src/crypto/x509/parser.go b/src/crypto/x509/parser.go index 4abcc1b7b590e2..680dcee203a828 100644 --- a/src/crypto/x509/parser.go +++ b/src/crypto/x509/parser.go @@ -429,10 +429,8 @@ func parseSANExtension(der cryptobyte.String) (dnsNames, emailAddresses []string if err != nil { return fmt.Errorf("x509: cannot parse URI %q: %s", uriStr, err) } - if len(uri.Host) > 0 { - if _, ok := domainToReverseLabels(uri.Host); !ok { - return fmt.Errorf("x509: cannot parse URI %q: invalid domain", uriStr) - } + if len(uri.Host) > 0 && !domainNameValid(uri.Host, false) { + return fmt.Errorf("x509: cannot parse URI %q: invalid domain", uriStr) } uris = append(uris, uri) case nameTypeIP: @@ -598,15 +596,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error()) } - trimmedDomain := domain - if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' { - // constraints can have a leading - // period to exclude the domain - // itself, but that's not valid in a - // normal domain name. - trimmedDomain = trimmedDomain[1:] - } - if _, ok := domainToReverseLabels(trimmedDomain); !ok { + if !domainNameValid(domain, true) { return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse dnsName constraint %q", domain) } dnsNames = append(dnsNames, domain) @@ -647,12 +637,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint) } } else { - // Otherwise it's a domain name. - domain := constraint - if len(domain) > 0 && domain[0] == '.' { - domain = domain[1:] - } - if _, ok := domainToReverseLabels(domain); !ok { + if !domainNameValid(constraint, true) { return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint) } } @@ -668,15 +653,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q: cannot be IP address", domain) } - trimmedDomain := domain - if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' { - // constraints can have a leading - // period to exclude the domain itself, - // but that's not valid in a normal - // domain name. - trimmedDomain = trimmedDomain[1:] - } - if _, ok := domainToReverseLabels(trimmedDomain); !ok { + if !domainNameValid(domain, true) { return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q", domain) } uriDomains = append(uriDomains, domain) @@ -1317,3 +1294,62 @@ func ParseRevocationList(der []byte) (*RevocationList, error) { return rl, nil } + +// domainNameValid is an alloc-less version of the checks that +// domainToReverseLabels does. +func domainNameValid(s string, constraint bool) bool { + // TODO(#75835): This function omits a number of checks which we + // really should be doing to enforce that domain names are valid names per + // RFC 1034. We previously enabled these checks, but this broke a + // significant number of certificates we previously considered valid, and we + // happily create via CreateCertificate (et al). We should enable these + // checks, but will need to gate them behind a GODEBUG. + // + // I have left the checks we previously enabled, noted with "TODO(#75835)" so + // that we can easily re-enable them once we unbreak everyone. + + // TODO(#75835): this should only be true for constraints. + if len(s) == 0 { + return true + } + + // Do not allow trailing period (FQDN format is not allowed in SANs or + // constraints). + if s[len(s)-1] == '.' { + return false + } + + // TODO(#75835): domains must have at least one label, cannot have + // a leading empty label, and cannot be longer than 253 characters. + // if len(s) == 0 || (!constraint && s[0] == '.') || len(s) > 253 { + // return false + // } + + lastDot := -1 + if constraint && s[0] == '.' { + s = s[1:] + } + + for i := 0; i <= len(s); i++ { + if i < len(s) && (s[i] < 33 || s[i] > 126) { + // Invalid character. + return false + } + if i == len(s) || s[i] == '.' { + labelLen := i + if lastDot >= 0 { + labelLen -= lastDot + 1 + } + if labelLen == 0 { + return false + } + // TODO(#75835): labels cannot be longer than 63 characters. + // if labelLen > 63 { + // return false + // } + lastDot = i + } + } + + return true +} diff --git a/src/crypto/x509/parser_test.go b/src/crypto/x509/parser_test.go index 3b9d9aed826b01..d53b805b786990 100644 --- a/src/crypto/x509/parser_test.go +++ b/src/crypto/x509/parser_test.go @@ -5,9 +5,13 @@ package x509 import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" "encoding/asn1" "encoding/pem" "os" + "strings" "testing" cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1" @@ -251,3 +255,106 @@ d5l1tRhScKu2NBgm74nYmJxJYgvuTA38wGhRrGU= } } } + +func TestDomainNameValid(t *testing.T) { + for _, tc := range []struct { + name string + dnsName string + constraint bool + valid bool + }{ + // TODO(#75835): these tests are for stricter name validation, which we + // had to disable. Once we reenable these strict checks, behind a + // GODEBUG, we should add them back in. + // {"empty name, name", "", false, false}, + // {"254 char label, name", strings.Repeat("a.a", 84) + "aaa", false, false}, + // {"254 char label, constraint", strings.Repeat("a.a", 84) + "aaa", true, false}, + // {"253 char label, name", strings.Repeat("a.a", 84) + "aa", false, false}, + // {"253 char label, constraint", strings.Repeat("a.a", 84) + "aa", true, false}, + // {"64 char single label, name", strings.Repeat("a", 64), false, false}, + // {"64 char single label, constraint", strings.Repeat("a", 64), true, false}, + // {"64 char label, name", "a." + strings.Repeat("a", 64), false, false}, + // {"64 char label, constraint", "a." + strings.Repeat("a", 64), true, false}, + + // TODO(#75835): these are the inverse of the tests above, they should be removed + // once the strict checking is enabled. + {"254 char label, name", strings.Repeat("a.a", 84) + "aaa", false, true}, + {"254 char label, constraint", strings.Repeat("a.a", 84) + "aaa", true, true}, + {"253 char label, name", strings.Repeat("a.a", 84) + "aa", false, true}, + {"253 char label, constraint", strings.Repeat("a.a", 84) + "aa", true, true}, + {"64 char single label, name", strings.Repeat("a", 64), false, true}, + {"64 char single label, constraint", strings.Repeat("a", 64), true, true}, + {"64 char label, name", "a." + strings.Repeat("a", 64), false, true}, + {"64 char label, constraint", "a." + strings.Repeat("a", 64), true, true}, + + // Check we properly enforce properties of domain names. + {"empty name, constraint", "", true, true}, + {"empty label, name", "a..a", false, false}, + {"empty label, constraint", "a..a", true, false}, + {"period, name", ".", false, false}, + {"period, constraint", ".", true, false}, // TODO(roland): not entirely clear if this is a valid constraint (require at least one label?) + {"valid, name", "a.b.c", false, true}, + {"valid, constraint", "a.b.c", true, true}, + {"leading period, name", ".a.b.c", false, false}, + {"leading period, constraint", ".a.b.c", true, true}, + {"trailing period, name", "a.", false, false}, + {"trailing period, constraint", "a.", true, false}, + {"bare label, name", "a", false, true}, + {"bare label, constraint", "a", true, true}, + {"63 char single label, name", strings.Repeat("a", 63), false, true}, + {"63 char single label, constraint", strings.Repeat("a", 63), true, true}, + {"63 char label, name", "a." + strings.Repeat("a", 63), false, true}, + {"63 char label, constraint", "a." + strings.Repeat("a", 63), true, true}, + } { + t.Run(tc.name, func(t *testing.T) { + valid := domainNameValid(tc.dnsName, tc.constraint) + if tc.valid != valid { + t.Errorf("domainNameValid(%q, %t) = %v; want %v", tc.dnsName, tc.constraint, !tc.valid, tc.valid) + } + // Also check that we enforce the same properties as domainToReverseLabels + trimmedName := tc.dnsName + if tc.constraint && len(trimmedName) > 1 && trimmedName[0] == '.' { + trimmedName = trimmedName[1:] + } + _, revValid := domainToReverseLabels(trimmedName) + if valid != revValid { + t.Errorf("domainNameValid(%q, %t) = %t != domainToReverseLabels(%q) = %t", tc.dnsName, tc.constraint, valid, trimmedName, revValid) + } + }) + } +} + +func TestRoundtripWeirdSANs(t *testing.T) { + // TODO(#75835): check that certificates we create with CreateCertificate that have malformed SAN values + // can be parsed by ParseCertificate. We should eventually restrict this, but for now we have to maintain + // this property as people have been relying on it. + k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + badNames := []string{ + "baredomain", + "baredomain.", + strings.Repeat("a", 255), + strings.Repeat("a", 65) + ".com", + } + tmpl := &Certificate{ + EmailAddresses: badNames, + DNSNames: badNames, + } + b, err := CreateCertificate(rand.Reader, tmpl, tmpl, &k.PublicKey, k) + if err != nil { + t.Fatal(err) + } + _, err = ParseCertificate(b) + if err != nil { + t.Fatalf("Couldn't roundtrip certificate: %v", err) + } +} + +func FuzzDomainNameValid(f *testing.F) { + f.Fuzz(func(t *testing.T, data string) { + domainNameValid(data, false) + domainNameValid(data, true) + }) +} diff --git a/src/crypto/x509/verify.go b/src/crypto/x509/verify.go index 7cc0fb2e3e0385..3de9f93b2c4b16 100644 --- a/src/crypto/x509/verify.go +++ b/src/crypto/x509/verify.go @@ -110,31 +110,38 @@ type HostnameError struct { func (h HostnameError) Error() string { c := h.Certificate + maxNamesIncluded := 100 if !c.hasSANExtension() && matchHostnames(c.Subject.CommonName, h.Host) { return "x509: certificate relies on legacy Common Name field, use SANs instead" } - var valid string + var valid strings.Builder if ip := net.ParseIP(h.Host); ip != nil { // Trying to validate an IP if len(c.IPAddresses) == 0 { return "x509: cannot validate certificate for " + h.Host + " because it doesn't contain any IP SANs" } + if len(c.IPAddresses) >= maxNamesIncluded { + return fmt.Sprintf("x509: certificate is valid for %d IP SANs, but none matched %s", len(c.IPAddresses), h.Host) + } for _, san := range c.IPAddresses { - if len(valid) > 0 { - valid += ", " + if valid.Len() > 0 { + valid.WriteString(", ") } - valid += san.String() + valid.WriteString(san.String()) } } else { - valid = strings.Join(c.DNSNames, ", ") + if len(c.DNSNames) >= maxNamesIncluded { + return fmt.Sprintf("x509: certificate is valid for %d names, but none matched %s", len(c.DNSNames), h.Host) + } + valid.WriteString(strings.Join(c.DNSNames, ", ")) } - if len(valid) == 0 { + if valid.Len() == 0 { return "x509: certificate is not valid for any names, but wanted to match " + h.Host } - return "x509: certificate is valid for " + valid + ", not " + h.Host + return "x509: certificate is valid for " + valid.String() + ", not " + h.Host } // UnknownAuthorityError results when the certificate issuer is unknown @@ -391,6 +398,7 @@ func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) { // domainToReverseLabels converts a textual domain name like foo.example.com to // the list of labels in reverse order, e.g. ["com", "example", "foo"]. func domainToReverseLabels(domain string) (reverseLabels []string, ok bool) { + reverseLabels = make([]string, 0, strings.Count(domain, ".")+1) for len(domain) > 0 { if i := strings.LastIndexByte(domain, '.'); i == -1 { reverseLabels = append(reverseLabels, domain) @@ -428,7 +436,7 @@ func domainToReverseLabels(domain string) (reverseLabels []string, ok bool) { return reverseLabels, true } -func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string) (bool, error) { +func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string, excluded bool, reversedDomainsCache map[string][]string, reversedConstraintsCache map[string][]string) (bool, error) { // If the constraint contains an @, then it specifies an exact mailbox // name. if strings.Contains(constraint, "@") { @@ -441,10 +449,10 @@ func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string) (bool, erro // Otherwise the constraint is like a DNS constraint of the domain part // of the mailbox. - return matchDomainConstraint(mailbox.domain, constraint) + return matchDomainConstraint(mailbox.domain, constraint, excluded, reversedDomainsCache, reversedConstraintsCache) } -func matchURIConstraint(uri *url.URL, constraint string) (bool, error) { +func matchURIConstraint(uri *url.URL, constraint string, excluded bool, reversedDomainsCache map[string][]string, reversedConstraintsCache map[string][]string) (bool, error) { // From RFC 5280, Section 4.2.1.10: // “a uniformResourceIdentifier that does not include an authority // component with a host name specified as a fully qualified domain @@ -473,7 +481,7 @@ func matchURIConstraint(uri *url.URL, constraint string) (bool, error) { return false, fmt.Errorf("URI with IP (%q) cannot be matched against constraints", uri.String()) } - return matchDomainConstraint(host, constraint) + return matchDomainConstraint(host, constraint, excluded, reversedDomainsCache, reversedConstraintsCache) } func matchIPConstraint(ip net.IP, constraint *net.IPNet) (bool, error) { @@ -490,16 +498,26 @@ func matchIPConstraint(ip net.IP, constraint *net.IPNet) (bool, error) { return true, nil } -func matchDomainConstraint(domain, constraint string) (bool, error) { +func matchDomainConstraint(domain, constraint string, excluded bool, reversedDomainsCache map[string][]string, reversedConstraintsCache map[string][]string) (bool, error) { // The meaning of zero length constraints is not specified, but this // code follows NSS and accepts them as matching everything. if len(constraint) == 0 { return true, nil } - domainLabels, ok := domainToReverseLabels(domain) - if !ok { - return false, fmt.Errorf("x509: internal error: cannot parse domain %q", domain) + domainLabels, found := reversedDomainsCache[domain] + if !found { + var ok bool + domainLabels, ok = domainToReverseLabels(domain) + if !ok { + return false, fmt.Errorf("x509: internal error: cannot parse domain %q", domain) + } + reversedDomainsCache[domain] = domainLabels + } + + wildcardDomain := false + if len(domain) > 0 && domain[0] == '*' { + wildcardDomain = true } // RFC 5280 says that a leading period in a domain name means that at @@ -513,9 +531,14 @@ func matchDomainConstraint(domain, constraint string) (bool, error) { constraint = constraint[1:] } - constraintLabels, ok := domainToReverseLabels(constraint) - if !ok { - return false, fmt.Errorf("x509: internal error: cannot parse domain %q", constraint) + constraintLabels, found := reversedConstraintsCache[constraint] + if !found { + var ok bool + constraintLabels, ok = domainToReverseLabels(constraint) + if !ok { + return false, fmt.Errorf("x509: internal error: cannot parse domain %q", constraint) + } + reversedConstraintsCache[constraint] = constraintLabels } if len(domainLabels) < len(constraintLabels) || @@ -523,6 +546,11 @@ func matchDomainConstraint(domain, constraint string) (bool, error) { return false, nil } + if excluded && wildcardDomain && len(domainLabels) > 1 && len(constraintLabels) > 0 { + domainLabels = domainLabels[:len(domainLabels)-1] + constraintLabels = constraintLabels[:len(constraintLabels)-1] + } + for i, constraintLabel := range constraintLabels { if !strings.EqualFold(constraintLabel, domainLabels[i]) { return false, nil @@ -542,7 +570,7 @@ func (c *Certificate) checkNameConstraints(count *int, nameType string, name string, parsedName any, - match func(parsedName, constraint any) (match bool, err error), + match func(parsedName, constraint any, excluded bool) (match bool, err error), permitted, excluded any) error { excludedValue := reflect.ValueOf(excluded) @@ -554,7 +582,7 @@ func (c *Certificate) checkNameConstraints(count *int, for i := 0; i < excludedValue.Len(); i++ { constraint := excludedValue.Index(i).Interface() - match, err := match(parsedName, constraint) + match, err := match(parsedName, constraint, true) if err != nil { return CertificateInvalidError{c, CANotAuthorizedForThisName, err.Error()} } @@ -576,7 +604,7 @@ func (c *Certificate) checkNameConstraints(count *int, constraint := permittedValue.Index(i).Interface() var err error - if ok, err = match(parsedName, constraint); err != nil { + if ok, err = match(parsedName, constraint, false); err != nil { return CertificateInvalidError{c, CANotAuthorizedForThisName, err.Error()} } @@ -636,6 +664,19 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V } } + // Each time we do constraint checking, we need to check the constraints in + // the current certificate against all of the names that preceded it. We + // reverse these names using domainToReverseLabels, which is a relatively + // expensive operation. Since we check each name against each constraint, + // this requires us to do N*C calls to domainToReverseLabels (where N is the + // total number of names that preceed the certificate, and C is the total + // number of constraints in the certificate). By caching the results of + // calling domainToReverseLabels, we can reduce that to N+C calls at the + // cost of keeping all of the parsed names and constraints in memory until + // we return from isValid. + reversedDomainsCache := map[string][]string{} + reversedConstraintsCache := map[string][]string{} + if (certType == intermediateCertificate || certType == rootCertificate) && c.hasNameConstraints() { toCheck := []*Certificate{} @@ -655,21 +696,21 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V } if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "email address", name, mailbox, - func(parsedName, constraint any) (bool, error) { - return matchEmailConstraint(parsedName.(rfc2821Mailbox), constraint.(string)) + func(parsedName, constraint any, excluded bool) (bool, error) { + return matchEmailConstraint(parsedName.(rfc2821Mailbox), constraint.(string), excluded, reversedDomainsCache, reversedConstraintsCache) }, c.PermittedEmailAddresses, c.ExcludedEmailAddresses); err != nil { return err } case nameTypeDNS: name := string(data) - if _, ok := domainToReverseLabels(name); !ok { + if !domainNameValid(name, false) { return fmt.Errorf("x509: cannot parse dnsName %q", name) } if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "DNS name", name, name, - func(parsedName, constraint any) (bool, error) { - return matchDomainConstraint(parsedName.(string), constraint.(string)) + func(parsedName, constraint any, excluded bool) (bool, error) { + return matchDomainConstraint(parsedName.(string), constraint.(string), excluded, reversedDomainsCache, reversedConstraintsCache) }, c.PermittedDNSDomains, c.ExcludedDNSDomains); err != nil { return err } @@ -682,8 +723,8 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V } if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "URI", name, uri, - func(parsedName, constraint any) (bool, error) { - return matchURIConstraint(parsedName.(*url.URL), constraint.(string)) + func(parsedName, constraint any, excluded bool) (bool, error) { + return matchURIConstraint(parsedName.(*url.URL), constraint.(string), excluded, reversedDomainsCache, reversedConstraintsCache) }, c.PermittedURIDomains, c.ExcludedURIDomains); err != nil { return err } @@ -695,7 +736,7 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V } if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "IP address", ip.String(), ip, - func(parsedName, constraint any) (bool, error) { + func(parsedName, constraint any, _ bool) (bool, error) { return matchIPConstraint(parsedName.(net.IP), constraint.(*net.IPNet)) }, c.PermittedIPRanges, c.ExcludedIPRanges); err != nil { return err @@ -927,7 +968,10 @@ func alreadyInChain(candidate *Certificate, chain []*Certificate) bool { if !bytes.Equal(candidate.RawSubject, cert.RawSubject) { continue } - if !candidate.PublicKey.(pubKeyEqual).Equal(cert.PublicKey) { + // We enforce the canonical encoding of SPKI (by only allowing the + // correct AI paremeter encodings in parseCertificate), so it's safe to + // directly compare the raw bytes. + if !bytes.Equal(candidate.RawSubjectPublicKeyInfo, cert.RawSubjectPublicKeyInfo) { continue } var certSAN *pkix.Extension diff --git a/src/crypto/x509/verify_test.go b/src/crypto/x509/verify_test.go index 7991f49946d587..9a21218ee4b465 100644 --- a/src/crypto/x509/verify_test.go +++ b/src/crypto/x509/verify_test.go @@ -6,16 +6,20 @@ package x509 import ( "crypto" + "crypto/dsa" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" + "crypto/rsa" "crypto/x509/pkix" "encoding/asn1" "encoding/pem" "errors" "fmt" "internal/testenv" + "log" "math/big" + "net" "os" "os/exec" "runtime" @@ -88,6 +92,26 @@ var verifyTests = []verifyTest{ errorCallback: expectHostnameError("certificate is valid for"), }, + { + name: "TooManyDNS", + leaf: generatePEMCertWithRepeatSAN(1677615892, 200, "fake.dns"), + roots: []string{generatePEMCertWithRepeatSAN(1677615892, 200, "fake.dns")}, + currentTime: 1677615892, + dnsName: "www.example.com", + systemSkip: true, // does not chain to a system root + + errorCallback: expectHostnameError("certificate is valid for 200 names, but none matched"), + }, + { + name: "TooManyIPs", + leaf: generatePEMCertWithRepeatSAN(1677615892, 150, "4.3.2.1"), + roots: []string{generatePEMCertWithRepeatSAN(1677615892, 150, "4.3.2.1")}, + currentTime: 1677615892, + dnsName: "1.2.3.4", + systemSkip: true, // does not chain to a system root + + errorCallback: expectHostnameError("certificate is valid for 150 IP SANs, but none matched"), + }, { name: "IPMissing", leaf: googleLeaf, @@ -551,6 +575,30 @@ func nameToKey(name *pkix.Name) string { return strings.Join(name.Country, ",") + "/" + strings.Join(name.Organization, ",") + "/" + strings.Join(name.OrganizationalUnit, ",") + "/" + name.CommonName } +func generatePEMCertWithRepeatSAN(currentTime int64, count int, san string) string { + cert := Certificate{ + NotBefore: time.Unix(currentTime, 0), + NotAfter: time.Unix(currentTime, 0), + } + if ip := net.ParseIP(san); ip != nil { + cert.IPAddresses = slices.Repeat([]net.IP{ip}, count) + } else { + cert.DNSNames = slices.Repeat([]string{san}, count) + } + privKey, err := rsa.GenerateKey(rand.Reader, 4096) + if err != nil { + log.Fatal(err) + } + certBytes, err := CreateCertificate(rand.Reader, &cert, &cert, &privKey.PublicKey, privKey) + if err != nil { + log.Fatal(err) + } + return string(pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: certBytes, + })) +} + const gtsIntermediate = `-----BEGIN CERTIFICATE----- MIIFljCCA36gAwIBAgINAgO8U1lrNMcY9QFQZjANBgkqhkiG9w0BAQsFADBHMQsw CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU @@ -1351,7 +1399,7 @@ var nameConstraintTests = []struct { func TestNameConstraints(t *testing.T) { for i, test := range nameConstraintTests { - result, err := matchDomainConstraint(test.domain, test.constraint) + result, err := matchDomainConstraint(test.domain, test.constraint, false, map[string][]string{}, map[string][]string{}) if err != nil && !test.expectError { t.Errorf("unexpected error for test #%d: domain=%s, constraint=%s, err=%s", i, test.domain, test.constraint, err) @@ -3048,3 +3096,129 @@ func TestInvalidPolicyWithAnyKeyUsage(t *testing.T) { t.Fatalf("unexpected error, got %q, want %q", err, expectedErr) } } + +func TestCertificateChainSignedByECDSA(t *testing.T) { + caKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + root := &Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{CommonName: "X"}, + NotBefore: time.Now().Add(-time.Hour), + NotAfter: time.Now().Add(365 * 24 * time.Hour), + IsCA: true, + KeyUsage: KeyUsageCertSign | KeyUsageCRLSign, + BasicConstraintsValid: true, + } + caDER, err := CreateCertificate(rand.Reader, root, root, &caKey.PublicKey, caKey) + if err != nil { + t.Fatal(err) + } + root, err = ParseCertificate(caDER) + if err != nil { + t.Fatal(err) + } + + leafKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + leaf := &Certificate{ + SerialNumber: big.NewInt(42), + Subject: pkix.Name{CommonName: "leaf"}, + NotBefore: time.Now().Add(-10 * time.Minute), + NotAfter: time.Now().Add(24 * time.Hour), + KeyUsage: KeyUsageDigitalSignature, + ExtKeyUsage: []ExtKeyUsage{ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + leafDER, err := CreateCertificate(rand.Reader, leaf, root, &leafKey.PublicKey, caKey) + if err != nil { + t.Fatal(err) + } + leaf, err = ParseCertificate(leafDER) + if err != nil { + t.Fatal(err) + } + + inter, err := ParseCertificate(dsaSelfSignedCNX(t)) + if err != nil { + t.Fatal(err) + } + + inters := NewCertPool() + inters.AddCert(root) + inters.AddCert(inter) + + wantErr := "certificate signed by unknown authority" + _, err = leaf.Verify(VerifyOptions{Intermediates: inters, Roots: NewCertPool()}) + if !strings.Contains(err.Error(), wantErr) { + t.Errorf("got %v, want %q", err, wantErr) + } +} + +// dsaSelfSignedCNX produces DER-encoded +// certificate with the properties: +// +// Subject=Issuer=CN=X +// DSA SPKI +// Matching inner/outer signature OIDs +// Dummy ECDSA signature +func dsaSelfSignedCNX(t *testing.T) []byte { + t.Helper() + var params dsa.Parameters + if err := dsa.GenerateParameters(¶ms, rand.Reader, dsa.L1024N160); err != nil { + t.Fatal(err) + } + + var dsaPriv dsa.PrivateKey + dsaPriv.Parameters = params + if err := dsa.GenerateKey(&dsaPriv, rand.Reader); err != nil { + t.Fatal(err) + } + dsaPub := &dsaPriv.PublicKey + + type dsaParams struct{ P, Q, G *big.Int } + paramDER, err := asn1.Marshal(dsaParams{dsaPub.P, dsaPub.Q, dsaPub.G}) + if err != nil { + t.Fatal(err) + } + yDER, err := asn1.Marshal(dsaPub.Y) + if err != nil { + t.Fatal(err) + } + + spki := publicKeyInfo{ + Algorithm: pkix.AlgorithmIdentifier{ + Algorithm: oidPublicKeyDSA, + Parameters: asn1.RawValue{FullBytes: paramDER}, + }, + PublicKey: asn1.BitString{Bytes: yDER, BitLength: 8 * len(yDER)}, + } + + rdn := pkix.Name{CommonName: "X"}.ToRDNSequence() + b, err := asn1.Marshal(rdn) + if err != nil { + t.Fatal(err) + } + rawName := asn1.RawValue{FullBytes: b} + + algoIdent := pkix.AlgorithmIdentifier{Algorithm: oidSignatureDSAWithSHA256} + tbs := tbsCertificate{ + Version: 0, + SerialNumber: big.NewInt(1002), + SignatureAlgorithm: algoIdent, + Issuer: rawName, + Validity: validity{NotBefore: time.Now().Add(-time.Hour), NotAfter: time.Now().Add(24 * time.Hour)}, + Subject: rawName, + PublicKey: spki, + } + c := certificate{ + TBSCertificate: tbs, + SignatureAlgorithm: algoIdent, + SignatureValue: asn1.BitString{Bytes: []byte{0}, BitLength: 8}, + } + dsaDER, err := asn1.Marshal(c) + if err != nil { + t.Fatal(err) + } + return dsaDER +} diff --git a/src/database/sql/convert.go b/src/database/sql/convert.go index 65fdfe6fa8c3ad..26b139ababd178 100644 --- a/src/database/sql/convert.go +++ b/src/database/sql/convert.go @@ -335,7 +335,6 @@ func convertAssignRows(dest, src any, rows *Rows) error { if rows == nil { return errors.New("invalid context to convert cursor rows, missing parent *Rows") } - rows.closemu.Lock() *d = Rows{ dc: rows.dc, releaseConn: func(error) {}, @@ -351,7 +350,6 @@ func convertAssignRows(dest, src any, rows *Rows) error { parentCancel() } } - rows.closemu.Unlock() return nil } } diff --git a/src/database/sql/fakedb_test.go b/src/database/sql/fakedb_test.go index 3dfcd447b52bca..003e6c62986f31 100644 --- a/src/database/sql/fakedb_test.go +++ b/src/database/sql/fakedb_test.go @@ -5,6 +5,7 @@ package sql import ( + "bytes" "context" "database/sql/driver" "errors" @@ -15,7 +16,6 @@ import ( "strconv" "strings" "sync" - "sync/atomic" "testing" "time" ) @@ -91,8 +91,6 @@ func (cc *fakeDriverCtx) OpenConnector(name string) (driver.Connector, error) { type fakeDB struct { name string - useRawBytes atomic.Bool - mu sync.Mutex tables map[string]*table badConn bool @@ -684,8 +682,6 @@ func (c *fakeConn) PrepareContext(ctx context.Context, query string) (driver.Stm switch cmd { case "WIPE": // Nothing - case "USE_RAWBYTES": - c.db.useRawBytes.Store(true) case "SELECT": stmt, err = c.prepareSelect(stmt, parts) case "CREATE": @@ -789,9 +785,6 @@ func (s *fakeStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (d case "WIPE": db.wipe() return driver.ResultNoRows, nil - case "USE_RAWBYTES": - s.c.db.useRawBytes.Store(true) - return driver.ResultNoRows, nil case "CREATE": if err := db.createTable(s.table, s.colName, s.colType); err != nil { return nil, err @@ -1076,10 +1069,9 @@ type rowsCursor struct { errPos int err error - // a clone of slices to give out to clients, indexed by the - // original slice's first byte address. we clone them - // just so we're able to corrupt them on close. - bytesClone map[*byte][]byte + // Data returned to clients. + // We clone and stash it here so it can be invalidated by Close and Next. + driverOwnedMemory [][]byte // Every operation writes to line to enable the race detector // check for data races. @@ -1096,9 +1088,19 @@ func (rc *rowsCursor) touchMem() { rc.line++ } +func (rc *rowsCursor) invalidateDriverOwnedMemory() { + for _, buf := range rc.driverOwnedMemory { + for i := range buf { + buf[i] = 'x' + } + } + rc.driverOwnedMemory = nil +} + func (rc *rowsCursor) Close() error { rc.touchMem() rc.parentMem.touchMem() + rc.invalidateDriverOwnedMemory() rc.closed = true return rc.closeErr } @@ -1129,6 +1131,8 @@ func (rc *rowsCursor) Next(dest []driver.Value) error { if rc.posRow >= len(rc.rows[rc.posSet]) { return io.EOF // per interface spec } + // Corrupt any previously returned bytes. + rc.invalidateDriverOwnedMemory() for i, v := range rc.rows[rc.posSet][rc.posRow].cols { // TODO(bradfitz): convert to subset types? naah, I // think the subset types should only be input to @@ -1136,20 +1140,13 @@ func (rc *rowsCursor) Next(dest []driver.Value) error { // a wider range of types coming out of drivers. all // for ease of drivers, and to prevent drivers from // messing up conversions or doing them differently. - dest[i] = v - - if bs, ok := v.([]byte); ok && !rc.db.useRawBytes.Load() { - if rc.bytesClone == nil { - rc.bytesClone = make(map[*byte][]byte) - } - clone, ok := rc.bytesClone[&bs[0]] - if !ok { - clone = make([]byte, len(bs)) - copy(clone, bs) - rc.bytesClone[&bs[0]] = clone - } - dest[i] = clone + if bs, ok := v.([]byte); ok { + // Clone []bytes and stash for later invalidation. + bs = bytes.Clone(bs) + rc.driverOwnedMemory = append(rc.driverOwnedMemory, bs) + v = bs } + dest[i] = v } return nil } diff --git a/src/database/sql/sql.go b/src/database/sql/sql.go index b0abcf7fcd408b..4be450ca876877 100644 --- a/src/database/sql/sql.go +++ b/src/database/sql/sql.go @@ -3368,38 +3368,36 @@ func (rs *Rows) Scan(dest ...any) error { // without calling Next. return fmt.Errorf("sql: Scan called without calling Next (closemuScanHold)") } + rs.closemu.RLock() + rs.raw = rs.raw[:0] + err := rs.scanLocked(dest...) + if err == nil && scanArgsContainRawBytes(dest) { + rs.closemuScanHold = true + } else { + rs.closemu.RUnlock() + } + return err +} +func (rs *Rows) scanLocked(dest ...any) error { if rs.lasterr != nil && rs.lasterr != io.EOF { - rs.closemu.RUnlock() return rs.lasterr } if rs.closed { - err := rs.lasterrOrErrLocked(errRowsClosed) - rs.closemu.RUnlock() - return err - } - - if scanArgsContainRawBytes(dest) { - rs.closemuScanHold = true - rs.raw = rs.raw[:0] - } else { - rs.closemu.RUnlock() + return rs.lasterrOrErrLocked(errRowsClosed) } if rs.lastcols == nil { - rs.closemuRUnlockIfHeldByScan() return errors.New("sql: Scan called without calling Next") } if len(dest) != len(rs.lastcols) { - rs.closemuRUnlockIfHeldByScan() return fmt.Errorf("sql: expected %d destination arguments in Scan, not %d", len(rs.lastcols), len(dest)) } for i, sv := range rs.lastcols { err := convertAssignRows(dest[i], sv, rs) if err != nil { - rs.closemuRUnlockIfHeldByScan() return fmt.Errorf(`sql: Scan error on column index %d, name %q: %w`, i, rs.rowsi.Columns()[i], err) } } diff --git a/src/database/sql/sql_test.go b/src/database/sql/sql_test.go index 74b9bf550249c7..4750edd4717561 100644 --- a/src/database/sql/sql_test.go +++ b/src/database/sql/sql_test.go @@ -5,6 +5,7 @@ package sql import ( + "bytes" "context" "database/sql/driver" "errors" @@ -4434,10 +4435,6 @@ func testContextCancelDuringRawBytesScan(t *testing.T, mode string) { db := newTestDB(t, "people") defer closeDB(t, db) - if _, err := db.Exec("USE_RAWBYTES"); err != nil { - t.Fatal(err) - } - // cancel used to call close asynchronously. // This test checks that it waits so as not to interfere with RawBytes. ctx, cancel := context.WithCancel(context.Background()) @@ -4529,6 +4526,61 @@ func TestContextCancelBetweenNextAndErr(t *testing.T) { } } +type testScanner struct { + scanf func(src any) error +} + +func (ts testScanner) Scan(src any) error { return ts.scanf(src) } + +func TestContextCancelDuringScan(t *testing.T) { + db := newTestDB(t, "people") + defer closeDB(t, db) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + scanStart := make(chan any) + scanEnd := make(chan error) + scanner := &testScanner{ + scanf: func(src any) error { + scanStart <- src + return <-scanEnd + }, + } + + // Start a query, and pause it mid-scan. + want := []byte("Alice") + r, err := db.QueryContext(ctx, "SELECT|people|name|name=?", string(want)) + if err != nil { + t.Fatal(err) + } + if !r.Next() { + t.Fatalf("r.Next() = false, want true") + } + go func() { + r.Scan(scanner) + }() + got := <-scanStart + defer close(scanEnd) + gotBytes, ok := got.([]byte) + if !ok { + t.Fatalf("r.Scan returned %T, want []byte", got) + } + if !bytes.Equal(gotBytes, want) { + t.Fatalf("before cancel: r.Scan returned %q, want %q", gotBytes, want) + } + + // Cancel the query. + // Sleep to give it a chance to finish canceling. + cancel() + time.Sleep(10 * time.Millisecond) + + // Cancelling the query should not have changed the result. + if !bytes.Equal(gotBytes, want) { + t.Fatalf("after cancel: r.Scan result is now %q, want %q", gotBytes, want) + } +} + func TestNilErrorAfterClose(t *testing.T) { db := newTestDB(t, "people") defer closeDB(t, db) @@ -4562,10 +4614,6 @@ func TestRawBytesReuse(t *testing.T) { db := newTestDB(t, "people") defer closeDB(t, db) - if _, err := db.Exec("USE_RAWBYTES"); err != nil { - t.Fatal(err) - } - var raw RawBytes // The RawBytes in this query aliases driver-owned memory. diff --git a/src/debug/pe/symbol.go b/src/debug/pe/symbol.go index 6e8d9d16c22540..80acebe9f1f40b 100644 --- a/src/debug/pe/symbol.go +++ b/src/debug/pe/symbol.go @@ -98,7 +98,12 @@ func readCOFFSymbols(fh *FileHeader, r io.ReadSeeker) ([]COFFSymbol, error) { // isSymNameOffset checks symbol name if it is encoded as offset into string table. func isSymNameOffset(name [8]byte) (bool, uint32) { if name[0] == 0 && name[1] == 0 && name[2] == 0 && name[3] == 0 { - return true, binary.LittleEndian.Uint32(name[4:]) + offset := binary.LittleEndian.Uint32(name[4:]) + if offset == 0 { + // symbol has no name + return false, 0 + } + return true, offset } return false, 0 } diff --git a/src/encoding/asn1/asn1.go b/src/encoding/asn1/asn1.go index 0b64f06d368b53..f4be515b98ef1c 100644 --- a/src/encoding/asn1/asn1.go +++ b/src/encoding/asn1/asn1.go @@ -22,6 +22,7 @@ package asn1 import ( "errors" "fmt" + "internal/saferio" "math" "math/big" "reflect" @@ -666,10 +667,17 @@ func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type offset += t.length numElements++ } - ret = reflect.MakeSlice(sliceType, numElements, numElements) + elemSize := uint64(elemType.Size()) + safeCap := saferio.SliceCapWithSize(elemSize, uint64(numElements)) + if safeCap < 0 { + err = SyntaxError{fmt.Sprintf("%s slice too big: %d elements of %d bytes", elemType.Kind(), numElements, elemSize)} + return + } + ret = reflect.MakeSlice(sliceType, 0, safeCap) params := fieldParameters{} offset := 0 for i := 0; i < numElements; i++ { + ret = reflect.Append(ret, reflect.Zero(elemType)) offset, err = parseField(ret.Index(i), bytes, offset, params) if err != nil { return diff --git a/src/encoding/asn1/asn1_test.go b/src/encoding/asn1/asn1_test.go index 0597740bd5e5ec..41cc0ba50ec304 100644 --- a/src/encoding/asn1/asn1_test.go +++ b/src/encoding/asn1/asn1_test.go @@ -7,10 +7,12 @@ package asn1 import ( "bytes" "encoding/hex" + "errors" "fmt" "math" "math/big" "reflect" + "runtime" "strings" "testing" "time" @@ -1216,3 +1218,39 @@ func TestImplicitTypeRoundtrip(t *testing.T) { t.Fatalf("Unexpected diff after roundtripping struct\na: %#v\nb: %#v", a, b) } } + +func TestParsingMemoryConsumption(t *testing.T) { + // Craft a syntatically valid, but empty, ~10 MB DER bomb. A successful + // unmarshal of this bomb should yield ~280 MB. However, the parsing should + // fail due to the empty content; and, in such cases, we want to make sure + // that we do not unnecessarily allocate memories. + derBomb := make([]byte, 10_000_000) + for i := range derBomb { + derBomb[i] = 0x30 + } + derBomb = append([]byte{0x30, 0x83, 0x98, 0x96, 0x80}, derBomb...) + + var m runtime.MemStats + runtime.GC() + runtime.ReadMemStats(&m) + memBefore := m.TotalAlloc + + var out []struct { + Id []int + Critical bool `asn1:"optional"` + Value []byte + } + _, err := Unmarshal(derBomb, &out) + if !errors.As(err, &SyntaxError{}) { + t.Fatalf("Incorrect error result: want (%v), but got (%v) instead", &SyntaxError{}, err) + } + + runtime.ReadMemStats(&m) + memDiff := m.TotalAlloc - memBefore + + // Ensure that the memory allocated does not exceed 10<<21 (~20 MB) when + // the parsing fails. + if memDiff > 10<<21 { + t.Errorf("Too much memory allocated while parsing DER: %v MiB", memDiff/1024/1024) + } +} diff --git a/src/encoding/pem/pem.go b/src/encoding/pem/pem.go index dcc7416ee21ffe..6bf2b41ad0eb7f 100644 --- a/src/encoding/pem/pem.go +++ b/src/encoding/pem/pem.go @@ -37,7 +37,7 @@ type Block struct { // line bytes. The remainder of the byte array (also not including the new line // bytes) is also returned and this will always be smaller than the original // argument. -func getLine(data []byte) (line, rest []byte) { +func getLine(data []byte) (line, rest []byte, consumed int) { i := bytes.IndexByte(data, '\n') var j int if i < 0 { @@ -49,7 +49,7 @@ func getLine(data []byte) (line, rest []byte) { i-- } } - return bytes.TrimRight(data[0:i], " \t"), data[j:] + return bytes.TrimRight(data[0:i], " \t"), data[j:], j } // removeSpacesAndTabs returns a copy of its input with all spaces and tabs @@ -90,17 +90,37 @@ func Decode(data []byte) (p *Block, rest []byte) { // pemStart begins with a newline. However, at the very beginning of // the byte array, we'll accept the start string without it. rest = data + + endTrailerIndex := 0 for { - if bytes.HasPrefix(rest, pemStart[1:]) { - rest = rest[len(pemStart)-1:] - } else if _, after, ok := bytes.Cut(rest, pemStart); ok { - rest = after - } else { + // If we've already tried parsing a block, skip past the END we already + // saw. + if endTrailerIndex < 0 || endTrailerIndex > len(rest) { + return nil, data + } + rest = rest[endTrailerIndex:] + + // Find the first END line, and then find the last BEGIN line before + // the end line. This lets us skip any repeated BEGIN lines that don't + // have a matching END. + endIndex := bytes.Index(rest, pemEnd) + if endIndex < 0 { return nil, data } + endTrailerIndex = endIndex + len(pemEnd) + beginIndex := bytes.LastIndex(rest[:endIndex], pemStart[1:]) + if beginIndex < 0 || (beginIndex > 0 && rest[beginIndex-1] != '\n') { + continue + } + rest = rest[beginIndex+len(pemStart)-1:] + endIndex -= beginIndex + len(pemStart) - 1 + endTrailerIndex -= beginIndex + len(pemStart) - 1 var typeLine []byte - typeLine, rest = getLine(rest) + var consumed int + typeLine, rest, consumed = getLine(rest) + endIndex -= consumed + endTrailerIndex -= consumed if !bytes.HasSuffix(typeLine, pemEndOfLine) { continue } @@ -117,7 +137,7 @@ func Decode(data []byte) (p *Block, rest []byte) { if len(rest) == 0 { return nil, data } - line, next := getLine(rest) + line, next, consumed := getLine(rest) key, val, ok := bytes.Cut(line, colon) if !ok { @@ -129,21 +149,13 @@ func Decode(data []byte) (p *Block, rest []byte) { val = bytes.TrimSpace(val) p.Headers[string(key)] = string(val) rest = next + endIndex -= consumed + endTrailerIndex -= consumed } - var endIndex, endTrailerIndex int - - // If there were no headers, the END line might occur - // immediately, without a leading newline. - if len(p.Headers) == 0 && bytes.HasPrefix(rest, pemEnd[1:]) { - endIndex = 0 - endTrailerIndex = len(pemEnd) - 1 - } else { - endIndex = bytes.Index(rest, pemEnd) - endTrailerIndex = endIndex + len(pemEnd) - } - - if endIndex < 0 { + // If there were headers, there must be a newline between the headers + // and the END line, so endIndex should be >= 0. + if len(p.Headers) > 0 && endIndex < 0 { continue } @@ -163,21 +175,24 @@ func Decode(data []byte) (p *Block, rest []byte) { } // The line must end with only whitespace. - if s, _ := getLine(restOfEndLine); len(s) != 0 { + if s, _, _ := getLine(restOfEndLine); len(s) != 0 { continue } - base64Data := removeSpacesAndTabs(rest[:endIndex]) - p.Bytes = make([]byte, base64.StdEncoding.DecodedLen(len(base64Data))) - n, err := base64.StdEncoding.Decode(p.Bytes, base64Data) - if err != nil { - continue + p.Bytes = []byte{} + if endIndex > 0 { + base64Data := removeSpacesAndTabs(rest[:endIndex]) + p.Bytes = make([]byte, base64.StdEncoding.DecodedLen(len(base64Data))) + n, err := base64.StdEncoding.Decode(p.Bytes, base64Data) + if err != nil { + continue + } + p.Bytes = p.Bytes[:n] } - p.Bytes = p.Bytes[:n] // the -1 is because we might have only matched pemEnd without the // leading newline if the PEM block was empty. - _, rest = getLine(rest[endIndex+len(pemEnd)-1:]) + _, rest, _ = getLine(rest[endIndex+len(pemEnd)-1:]) return p, rest } } diff --git a/src/encoding/pem/pem_test.go b/src/encoding/pem/pem_test.go index e252ffd8ed1613..fa6e8ba62bdb87 100644 --- a/src/encoding/pem/pem_test.go +++ b/src/encoding/pem/pem_test.go @@ -34,7 +34,7 @@ var getLineTests = []GetLineTest{ func TestGetLine(t *testing.T) { for i, test := range getLineTests { - x, y := getLine([]byte(test.in)) + x, y, _ := getLine([]byte(test.in)) if string(x) != test.out1 || string(y) != test.out2 { t.Errorf("#%d got:%+v,%+v want:%s,%s", i, x, y, test.out1, test.out2) } @@ -46,6 +46,7 @@ func TestDecode(t *testing.T) { if !reflect.DeepEqual(result, certificate) { t.Errorf("#0 got:%#v want:%#v", result, certificate) } + result, remainder = Decode(remainder) if !reflect.DeepEqual(result, privateKey) { t.Errorf("#1 got:%#v want:%#v", result, privateKey) @@ -68,7 +69,7 @@ func TestDecode(t *testing.T) { } result, remainder = Decode(remainder) - if result == nil || result.Type != "HEADERS" || len(result.Headers) != 1 { + if result == nil || result.Type != "VALID HEADERS" || len(result.Headers) != 1 { t.Errorf("#5 expected single header block but got :%v", result) } @@ -381,15 +382,15 @@ ZWAaUoVtWIQ52aKS0p19G99hhb+IVANC4akkdHV4SP8i7MVNZhfUmg== # This shouldn't be recognised because of the missing newline after the headers. ------BEGIN HEADERS----- +-----BEGIN INVALID HEADERS----- Header: 1 ------END HEADERS----- +-----END INVALID HEADERS----- # This should be valid, however. ------BEGIN HEADERS----- +-----BEGIN VALID HEADERS----- Header: 1 ------END HEADERS-----`) +-----END VALID HEADERS-----`) var certificate = &Block{Type: "CERTIFICATE", Headers: map[string]string{}, @@ -638,3 +639,104 @@ func TestBadEncode(t *testing.T) { } func testingKey(s string) string { return strings.ReplaceAll(s, "TESTING KEY", "PRIVATE KEY") } + +func TestDecodeStrangeCases(t *testing.T) { + sentinelType := "TEST BLOCK" + sentinelBytes := []byte("hello") + for _, tc := range []struct { + name string + pem string + }{ + { + name: "invalid section (not base64)", + pem: `-----BEGIN COMMENT----- +foo foo foo +-----END COMMENT----- +-----BEGIN TEST BLOCK----- +aGVsbG8= +-----END TEST BLOCK-----`, + }, + { + name: "leading garbage on block", + pem: `foo foo foo-----BEGIN CERTIFICATE----- +MCowBQYDK2VwAyEApVjJeLW5MoP6uR3+OeITokM+rBDng6dgl1vvhcy+wws= +-----END PUBLIC KEY----- +-----BEGIN TEST BLOCK----- +aGVsbG8= +-----END TEST BLOCK-----`, + }, + { + name: "leading garbage", + pem: `foo foo foo +-----BEGIN TEST BLOCK----- +aGVsbG8= +-----END TEST BLOCK-----`, + }, + { + name: "leading partial block", + pem: `foo foo foo +-----END COMMENT----- +-----BEGIN TEST BLOCK----- +aGVsbG8= +-----END TEST BLOCK-----`, + }, + { + name: "multiple BEGIN", + pem: `-----BEGIN TEST BLOCK----- +-----BEGIN TEST BLOCK----- +-----BEGIN TEST BLOCK----- +aGVsbG8= +-----END TEST BLOCK-----`, + }, + { + name: "multiple END", + pem: `-----BEGIN TEST BLOCK----- +aGVsbG8= +-----END TEST BLOCK----- +-----END TEST BLOCK----- +-----END TEST BLOCK-----`, + }, + { + name: "leading malformed BEGIN", + pem: `-----BEGIN PUBLIC KEY +aGVsbG8= +-----END PUBLIC KEY----- +-----BEGIN TEST BLOCK----- +aGVsbG8= +-----END TEST BLOCK-----`, + }, + } { + t.Run(tc.name, func(t *testing.T) { + block, _ := Decode([]byte(tc.pem)) + if block == nil { + t.Fatal("expected valid block") + } + if block.Type != sentinelType { + t.Fatalf("unexpected block returned, got type %q, want type %q", block.Type, sentinelType) + } + if !bytes.Equal(block.Bytes, sentinelBytes) { + t.Fatalf("unexpected block content, got %x, want %x", block.Bytes, sentinelBytes) + } + }) + } +} + +func TestJustEnd(t *testing.T) { + pemData := ` +-----END PUBLIC KEY-----` + + block, _ := Decode([]byte(pemData)) + if block != nil { + t.Fatal("unexpected block") + } +} + +func FuzzDecode(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + Decode(data) + }) +} + +func TestMissingEndTrailer(t *testing.T) { + Decode([]byte{0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0xa, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x45, 0x4e, 0x44, 0x20}) +} diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go index 6d92542e31b652..641d1a325a5c07 100644 --- a/src/go/build/deps_test.go +++ b/src/go/build/deps_test.go @@ -235,7 +235,6 @@ var depsRules = ` internal/types/errors, mime/quotedprintable, net/internal/socktest, - net/url, runtime/trace, text/scanner, text/tabwriter; @@ -298,6 +297,12 @@ var depsRules = ` FMT < text/template/parse; + internal/bytealg, internal/itoa, math/bits, slices, strconv, unique + < net/netip; + + FMT, net/netip + < net/url; + net/url, text/template/parse < text/template < internal/lazytemplate; @@ -412,9 +417,6 @@ var depsRules = ` < golang.org/x/net/dns/dnsmessage, golang.org/x/net/lif; - internal/bytealg, internal/itoa, math/bits, slices, strconv, unique - < net/netip; - os, net/netip < internal/routebsd; @@ -557,7 +559,7 @@ var depsRules = ` # CRYPTO-MATH is crypto that exposes math/big APIs - no cgo, net; fmt now ok. - CRYPTO, FMT, math/big + CRYPTO, FMT, math/big, internal/saferio < crypto/internal/boring/bbig < crypto/internal/fips140cache < crypto/rand diff --git a/src/internal/buildcfg/cfg.go b/src/internal/buildcfg/cfg.go index 5ae4c0c7adc8fe..50b122fdb8524e 100644 --- a/src/internal/buildcfg/cfg.go +++ b/src/internal/buildcfg/cfg.go @@ -85,7 +85,7 @@ func gofips140() string { } // isFIPSVersion reports whether v is a valid FIPS version, -// of the form vX.Y.Z. +// of the form vX.Y.Z or vX.Y.Z-hash. func isFIPSVersion(v string) bool { if !strings.HasPrefix(v, "v") { return false @@ -99,7 +99,8 @@ func isFIPSVersion(v string) bool { return false } v, ok = skipNum(v[len("."):]) - return ok && v == "" + hasHash := strings.HasPrefix(v, "-") && len(v) == len("-")+8 + return ok && (v == "" || hasHash) } // skipNum skips the leading text matching [0-9]+ diff --git a/src/internal/godebugs/table.go b/src/internal/godebugs/table.go index 2d008825459bb2..852305e8553aab 100644 --- a/src/internal/godebugs/table.go +++ b/src/internal/godebugs/table.go @@ -42,6 +42,7 @@ var All = []Info{ {Name: "http2client", Package: "net/http"}, {Name: "http2debug", Package: "net/http", Opaque: true}, {Name: "http2server", Package: "net/http"}, + {Name: "httpcookiemaxnum", Package: "net/http", Changed: 24, Old: "0"}, {Name: "httplaxcontentlength", Package: "net/http", Changed: 22, Old: "1"}, {Name: "httpmuxgo121", Package: "net/http", Changed: 22, Old: "1"}, {Name: "httpservecontentkeepheaders", Package: "net/http", Changed: 23, Old: "1"}, diff --git a/src/internal/poll/fd_windows.go b/src/internal/poll/fd_windows.go index acc2ab0c6e1eaf..dd833afd24b0a0 100644 --- a/src/internal/poll/fd_windows.go +++ b/src/internal/poll/fd_windows.go @@ -636,12 +636,22 @@ func (fd *FD) Pread(b []byte, off int64) (int, error) { fd.l.Lock() defer fd.l.Unlock() - curoffset, err := syscall.Seek(fd.Sysfd, 0, io.SeekCurrent) - if err != nil { - return 0, err + if fd.isBlocking { + curoffset, err := syscall.Seek(fd.Sysfd, 0, io.SeekCurrent) + if err != nil { + return 0, err + } + defer syscall.Seek(fd.Sysfd, curoffset, io.SeekStart) + defer fd.setOffset(curoffset) + } else { + // Overlapped handles don't have the file pointer updated + // when performing I/O operations, so there is no need to + // call Seek to reset the file pointer. + // Also, some overlapped file handles don't support seeking. + // See https://go.dev/issues/74951. + curoffset := fd.offset + defer fd.setOffset(curoffset) } - defer syscall.Seek(fd.Sysfd, curoffset, io.SeekStart) - defer fd.setOffset(curoffset) o := &fd.rop o.InitBuf(b) fd.setOffset(off) @@ -852,12 +862,22 @@ func (fd *FD) Pwrite(buf []byte, off int64) (int, error) { fd.l.Lock() defer fd.l.Unlock() - curoffset, err := syscall.Seek(fd.Sysfd, 0, io.SeekCurrent) - if err != nil { - return 0, err + if fd.isBlocking { + curoffset, err := syscall.Seek(fd.Sysfd, 0, io.SeekCurrent) + if err != nil { + return 0, err + } + defer syscall.Seek(fd.Sysfd, curoffset, io.SeekStart) + defer fd.setOffset(curoffset) + } else { + // Overlapped handles don't have the file pointer updated + // when performing I/O operations, so there is no need to + // call Seek to reset the file pointer. + // Also, some overlapped file handles don't support seeking. + // See https://go.dev/issues/74951. + curoffset := fd.offset + defer fd.setOffset(curoffset) } - defer syscall.Seek(fd.Sysfd, curoffset, io.SeekStart) - defer fd.setOffset(curoffset) var ntotal int for { @@ -1106,6 +1126,12 @@ func (fd *FD) Seek(offset int64, whence int) (int64, error) { fd.l.Lock() defer fd.l.Unlock() + if !fd.isBlocking && whence == io.SeekCurrent { + // Windows doesn't keep the file pointer for overlapped file handles. + // We do it ourselves in case to account for any read or write + // operations that may have occurred. + offset += fd.offset + } n, err := syscall.Seek(fd.Sysfd, offset, whence) fd.setOffset(n) return n, err diff --git a/src/internal/synctest/synctest_test.go b/src/internal/synctest/synctest_test.go index 6cebf86c31f416..73a0a1c453e3d5 100644 --- a/src/internal/synctest/synctest_test.go +++ b/src/internal/synctest/synctest_test.go @@ -383,57 +383,59 @@ func TestChannelMovedOutOfBubble(t *testing.T) { for _, test := range []struct { desc string f func(chan struct{}) - wantPanic string + wantFatal string }{{ desc: "receive", f: func(ch chan struct{}) { <-ch }, - wantPanic: "receive on synctest channel from outside bubble", + wantFatal: "receive on synctest channel from outside bubble", }, { desc: "send", f: func(ch chan struct{}) { ch <- struct{}{} }, - wantPanic: "send on synctest channel from outside bubble", + wantFatal: "send on synctest channel from outside bubble", }, { desc: "close", f: func(ch chan struct{}) { close(ch) }, - wantPanic: "close of synctest channel from outside bubble", + wantFatal: "close of synctest channel from outside bubble", }} { t.Run(test.desc, func(t *testing.T) { // Bubbled channel accessed from outside any bubble. t.Run("outside_bubble", func(t *testing.T) { - donec := make(chan struct{}) - ch := make(chan chan struct{}) - go func() { - defer close(donec) - defer wantPanic(t, test.wantPanic) - test.f(<-ch) - }() - synctest.Run(func() { - ch <- make(chan struct{}) + wantFatal(t, test.wantFatal, func() { + donec := make(chan struct{}) + ch := make(chan chan struct{}) + go func() { + defer close(donec) + test.f(<-ch) + }() + synctest.Run(func() { + ch <- make(chan struct{}) + }) + <-donec }) - <-donec }) // Bubbled channel accessed from a different bubble. t.Run("different_bubble", func(t *testing.T) { - donec := make(chan struct{}) - ch := make(chan chan struct{}) - go func() { - defer close(donec) - c := <-ch + wantFatal(t, test.wantFatal, func() { + donec := make(chan struct{}) + ch := make(chan chan struct{}) + go func() { + defer close(donec) + c := <-ch + synctest.Run(func() { + test.f(c) + }) + }() synctest.Run(func() { - defer wantPanic(t, test.wantPanic) - test.f(c) + ch <- make(chan struct{}) }) - }() - synctest.Run(func() { - ch <- make(chan struct{}) + <-donec }) - <-donec }) }) } @@ -443,39 +445,40 @@ func TestTimerFromInsideBubble(t *testing.T) { for _, test := range []struct { desc string f func(tm *time.Timer) - wantPanic string + wantFatal string }{{ desc: "read channel", f: func(tm *time.Timer) { <-tm.C }, - wantPanic: "receive on synctest channel from outside bubble", + wantFatal: "receive on synctest channel from outside bubble", }, { desc: "Reset", f: func(tm *time.Timer) { tm.Reset(1 * time.Second) }, - wantPanic: "reset of synctest timer from outside bubble", + wantFatal: "reset of synctest timer from outside bubble", }, { desc: "Stop", f: func(tm *time.Timer) { tm.Stop() }, - wantPanic: "stop of synctest timer from outside bubble", + wantFatal: "stop of synctest timer from outside bubble", }} { t.Run(test.desc, func(t *testing.T) { - donec := make(chan struct{}) - ch := make(chan *time.Timer) - go func() { - defer close(donec) - defer wantPanic(t, test.wantPanic) - test.f(<-ch) - }() - synctest.Run(func() { - tm := time.NewTimer(1 * time.Second) - ch <- tm + wantFatal(t, test.wantFatal, func() { + donec := make(chan struct{}) + ch := make(chan *time.Timer) + go func() { + defer close(donec) + test.f(<-ch) + }() + synctest.Run(func() { + tm := time.NewTimer(1 * time.Second) + ch <- tm + }) + <-donec }) - <-donec }) } } @@ -776,6 +779,28 @@ func TestWaitGroupHeapAllocated(t *testing.T) { }) } +// Issue #75134: Many racing bubble associations. +func TestWaitGroupManyBubbles(t *testing.T) { + var wg sync.WaitGroup + for range 100 { + wg.Go(func() { + synctest.Run(func() { + cancelc := make(chan struct{}) + var wg2 sync.WaitGroup + for range 100 { + wg2.Go(func() { + <-cancelc + }) + } + synctest.Wait() + close(cancelc) + wg2.Wait() + }) + }) + } + wg.Wait() +} + func TestHappensBefore(t *testing.T) { // Use two parallel goroutines accessing different vars to ensure that // we correctly account for multiple goroutines in the bubble. diff --git a/src/internal/syscall/windows/at_windows.go b/src/internal/syscall/windows/at_windows.go index d48fce1c99dc36..41cdaf0d2e34ca 100644 --- a/src/internal/syscall/windows/at_windows.go +++ b/src/internal/syscall/windows/at_windows.go @@ -204,7 +204,7 @@ func Deleteat(dirfd syscall.Handle, name string, options uint32) error { var h syscall.Handle err := NtOpenFile( &h, - SYNCHRONIZE|DELETE, + SYNCHRONIZE|FILE_READ_ATTRIBUTES|DELETE, objAttrs, &IO_STATUS_BLOCK{}, FILE_SHARE_DELETE|FILE_SHARE_READ|FILE_SHARE_WRITE, @@ -215,14 +215,22 @@ func Deleteat(dirfd syscall.Handle, name string, options uint32) error { } defer syscall.CloseHandle(h) - const ( - FileDispositionInformation = 13 - FileDispositionInformationEx = 64 - ) + if TestDeleteatFallback { + return deleteatFallback(h) + } + + const FileDispositionInformationEx = 64 // First, attempt to delete the file using POSIX semantics // (which permit a file to be deleted while it is still open). // This matches the behavior of DeleteFileW. + // + // The following call uses features available on different Windows versions: + // - FILE_DISPOSITION_INFORMATION_EX: Windows 10, version 1607 (aka RS1) + // - FILE_DISPOSITION_POSIX_SEMANTICS: Windows 10, version 1607 (aka RS1) + // - FILE_DISPOSITION_IGNORE_READONLY_ATTRIBUTE: Windows 10, version 1809 (aka RS5) + // + // Also, some file systems, like FAT32, don't support POSIX semantics. err = NtSetInformationFile( h, &IO_STATUS_BLOCK{}, @@ -241,28 +249,57 @@ func Deleteat(dirfd syscall.Handle, name string, options uint32) error { switch err { case nil: return nil - case STATUS_CANNOT_DELETE, STATUS_DIRECTORY_NOT_EMPTY: + case STATUS_INVALID_INFO_CLASS, // the operating system doesn't support FileDispositionInformationEx + STATUS_INVALID_PARAMETER, // the operating system doesn't support one of the flags + STATUS_NOT_SUPPORTED: // the file system doesn't support FILE_DISPOSITION_INFORMATION_EX or one of the flags + return deleteatFallback(h) + default: return err.(NTStatus).Errno() } +} - // If the prior deletion failed, the filesystem either doesn't support - // POSIX semantics (for example, FAT), or hasn't implemented - // FILE_DISPOSITION_INFORMATION_EX. - // - // Try again. - err = NtSetInformationFile( +// TestDeleteatFallback should only be used for testing purposes. +// When set, [Deleteat] uses the fallback path unconditionally. +var TestDeleteatFallback bool + +// deleteatFallback is a deleteat implementation that strives +// for compatibility with older Windows versions and file systems +// over performance. +func deleteatFallback(h syscall.Handle) error { + var data syscall.ByHandleFileInformation + if err := syscall.GetFileInformationByHandle(h, &data); err == nil && data.FileAttributes&syscall.FILE_ATTRIBUTE_READONLY != 0 { + // Remove read-only attribute. Reopen the file, as it was previously open without FILE_WRITE_ATTRIBUTES access + // in order to maximize compatibility in the happy path. + wh, err := ReOpenFile(h, + FILE_WRITE_ATTRIBUTES, + FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_SHARE_DELETE, + syscall.FILE_FLAG_OPEN_REPARSE_POINT|syscall.FILE_FLAG_BACKUP_SEMANTICS, + ) + if err != nil { + return err + } + err = SetFileInformationByHandle( + wh, + FileBasicInfo, + unsafe.Pointer(&FILE_BASIC_INFO{ + FileAttributes: data.FileAttributes &^ FILE_ATTRIBUTE_READONLY, + }), + uint32(unsafe.Sizeof(FILE_BASIC_INFO{})), + ) + syscall.CloseHandle(wh) + if err != nil { + return err + } + } + + return SetFileInformationByHandle( h, - &IO_STATUS_BLOCK{}, - unsafe.Pointer(&FILE_DISPOSITION_INFORMATION{ + FileDispositionInfo, + unsafe.Pointer(&FILE_DISPOSITION_INFO{ DeleteFile: true, }), - uint32(unsafe.Sizeof(FILE_DISPOSITION_INFORMATION{})), - FileDispositionInformation, + uint32(unsafe.Sizeof(FILE_DISPOSITION_INFO{})), ) - if st, ok := err.(NTStatus); ok { - return st.Errno() - } - return err } func Renameat(olddirfd syscall.Handle, oldpath string, newdirfd syscall.Handle, newpath string) error { diff --git a/src/internal/syscall/windows/symlink_windows.go b/src/internal/syscall/windows/symlink_windows.go index b91246037b5efe..b8249b3848ea02 100644 --- a/src/internal/syscall/windows/symlink_windows.go +++ b/src/internal/syscall/windows/symlink_windows.go @@ -19,6 +19,7 @@ const ( FileBasicInfo = 0 // FILE_BASIC_INFO FileStandardInfo = 1 // FILE_STANDARD_INFO FileNameInfo = 2 // FILE_NAME_INFO + FileDispositionInfo = 4 // FILE_DISPOSITION_INFO FileStreamInfo = 7 // FILE_STREAM_INFO FileCompressionInfo = 8 // FILE_COMPRESSION_INFO FileAttributeTagInfo = 9 // FILE_ATTRIBUTE_TAG_INFO diff --git a/src/internal/syscall/windows/syscall_windows.go b/src/internal/syscall/windows/syscall_windows.go index 905cabc81e4798..968dbaf061e4e7 100644 --- a/src/internal/syscall/windows/syscall_windows.go +++ b/src/internal/syscall/windows/syscall_windows.go @@ -529,6 +529,8 @@ const ( //sys GetOverlappedResult(handle syscall.Handle, overlapped *syscall.Overlapped, done *uint32, wait bool) (err error) //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW +//sys ReOpenFile(filehandle syscall.Handle, desiredAccess uint32, shareMode uint32, flagAndAttributes uint32) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] + // NTStatus corresponds with NTSTATUS, error values returned by ntdll.dll and // other native functions. type NTStatus uint32 @@ -554,6 +556,9 @@ const ( STATUS_NOT_A_DIRECTORY NTStatus = 0xC0000103 STATUS_CANNOT_DELETE NTStatus = 0xC0000121 STATUS_REPARSE_POINT_ENCOUNTERED NTStatus = 0xC000050B + STATUS_NOT_SUPPORTED NTStatus = 0xC00000BB + STATUS_INVALID_PARAMETER NTStatus = 0xC000000D + STATUS_INVALID_INFO_CLASS NTStatus = 0xC0000003 ) const ( diff --git a/src/internal/syscall/windows/types_windows.go b/src/internal/syscall/windows/types_windows.go index 93664b4b7da8ca..6d989e7e7e78bc 100644 --- a/src/internal/syscall/windows/types_windows.go +++ b/src/internal/syscall/windows/types_windows.go @@ -199,6 +199,11 @@ const ( FILE_OPEN_FOR_FREE_SPACE_QUERY = 0x00800000 ) +// https://learn.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_disposition_info +type FILE_DISPOSITION_INFO struct { + DeleteFile bool +} + // https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/ntddk/ns-ntddk-_file_disposition_information type FILE_DISPOSITION_INFORMATION struct { DeleteFile bool diff --git a/src/internal/syscall/windows/zsyscall_windows.go b/src/internal/syscall/windows/zsyscall_windows.go index 90cf0b92a49bc4..ba6c017ea96458 100644 --- a/src/internal/syscall/windows/zsyscall_windows.go +++ b/src/internal/syscall/windows/zsyscall_windows.go @@ -85,6 +85,7 @@ var ( procModule32NextW = modkernel32.NewProc("Module32NextW") procMoveFileExW = modkernel32.NewProc("MoveFileExW") procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") + procReOpenFile = modkernel32.NewProc("ReOpenFile") procRtlLookupFunctionEntry = modkernel32.NewProc("RtlLookupFunctionEntry") procRtlVirtualUnwind = modkernel32.NewProc("RtlVirtualUnwind") procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") @@ -431,6 +432,15 @@ func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, return } +func ReOpenFile(filehandle syscall.Handle, desiredAccess uint32, shareMode uint32, flagAndAttributes uint32) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall6(procReOpenFile.Addr(), 4, uintptr(filehandle), uintptr(desiredAccess), uintptr(shareMode), uintptr(flagAndAttributes), 0, 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = errnoErr(e1) + } + return +} + func RtlLookupFunctionEntry(pc uintptr, baseAddress *uintptr, table unsafe.Pointer) (ret *RUNTIME_FUNCTION) { r0, _, _ := syscall.Syscall(procRtlLookupFunctionEntry.Addr(), 3, uintptr(pc), uintptr(unsafe.Pointer(baseAddress)), uintptr(table)) ret = (*RUNTIME_FUNCTION)(unsafe.Pointer(r0)) diff --git a/src/mime/grammar.go b/src/mime/grammar.go index cc578fbcfd4168..1efd8a16dec607 100644 --- a/src/mime/grammar.go +++ b/src/mime/grammar.go @@ -62,7 +62,9 @@ func isTokenChar(c byte) bool { 1<<'^' | 1<<'_' | 1<<'`' | + 1<<'{' | 1<<'|' | + 1<<'}' | 1<<'~' return ((uint64(1)<>64)) != 0 diff --git a/src/mime/mediatype_test.go b/src/mime/mediatype_test.go index 251df8d6691ab9..da8d64de7a3f0c 100644 --- a/src/mime/mediatype_test.go +++ b/src/mime/mediatype_test.go @@ -413,6 +413,9 @@ func init() { // Issue #48866: duplicate parameters containing equal values should be allowed {`text; charset=utf-8; charset=utf-8; format=fixed`, "text", m("charset", "utf-8", "format", "fixed")}, {`text; charset=utf-8; format=flowed; charset=utf-8`, "text", m("charset", "utf-8", "format", "flowed")}, + + // Issue #76236: '{' and '}' are token chars. + {"attachment; filename={file}.png", "attachment", m("filename", "{file}.png")}, } } diff --git a/src/net/http/cookie.go b/src/net/http/cookie.go index 408fe88452b37a..d3c5c168ef21f1 100644 --- a/src/net/http/cookie.go +++ b/src/net/http/cookie.go @@ -7,6 +7,7 @@ package http import ( "errors" "fmt" + "internal/godebug" "log" "net" "net/http/internal/ascii" @@ -16,6 +17,8 @@ import ( "time" ) +var httpcookiemaxnum = godebug.New("httpcookiemaxnum") + // A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an // HTTP response or the Cookie header of an HTTP request. // @@ -58,16 +61,37 @@ const ( ) var ( - errBlankCookie = errors.New("http: blank cookie") - errEqualNotFoundInCookie = errors.New("http: '=' not found in cookie") - errInvalidCookieName = errors.New("http: invalid cookie name") - errInvalidCookieValue = errors.New("http: invalid cookie value") + errBlankCookie = errors.New("http: blank cookie") + errEqualNotFoundInCookie = errors.New("http: '=' not found in cookie") + errInvalidCookieName = errors.New("http: invalid cookie name") + errInvalidCookieValue = errors.New("http: invalid cookie value") + errCookieNumLimitExceeded = errors.New("http: number of cookies exceeded limit") ) +const defaultCookieMaxNum = 3000 + +func cookieNumWithinMax(cookieNum int) bool { + withinDefaultMax := cookieNum <= defaultCookieMaxNum + if httpcookiemaxnum.Value() == "" { + return withinDefaultMax + } + if customMax, err := strconv.Atoi(httpcookiemaxnum.Value()); err == nil { + withinCustomMax := customMax == 0 || cookieNum <= customMax + if withinDefaultMax != withinCustomMax { + httpcookiemaxnum.IncNonDefault() + } + return withinCustomMax + } + return withinDefaultMax +} + // ParseCookie parses a Cookie header value and returns all the cookies // which were set in it. Since the same cookie name can appear multiple times // the returned Values can contain more than one value for a given key. func ParseCookie(line string) ([]*Cookie, error) { + if !cookieNumWithinMax(strings.Count(line, ";") + 1) { + return nil, errCookieNumLimitExceeded + } parts := strings.Split(textproto.TrimString(line), ";") if len(parts) == 1 && parts[0] == "" { return nil, errBlankCookie @@ -197,11 +221,21 @@ func ParseSetCookie(line string) (*Cookie, error) { // readSetCookies parses all "Set-Cookie" values from // the header h and returns the successfully parsed Cookies. +// +// If the amount of cookies exceeds CookieNumLimit, and httpcookielimitnum +// GODEBUG option is not explicitly turned off, this function will silently +// fail and return an empty slice. func readSetCookies(h Header) []*Cookie { cookieCount := len(h["Set-Cookie"]) if cookieCount == 0 { return []*Cookie{} } + // Cookie limit was unfortunately introduced at a later point in time. + // As such, we can only fail by returning an empty slice rather than + // explicit error. + if !cookieNumWithinMax(cookieCount) { + return []*Cookie{} + } cookies := make([]*Cookie, 0, cookieCount) for _, line := range h["Set-Cookie"] { if cookie, err := ParseSetCookie(line); err == nil { @@ -329,13 +363,28 @@ func (c *Cookie) Valid() error { // readCookies parses all "Cookie" values from the header h and // returns the successfully parsed Cookies. // -// if filter isn't empty, only cookies of that name are returned. +// If filter isn't empty, only cookies of that name are returned. +// +// If the amount of cookies exceeds CookieNumLimit, and httpcookielimitnum +// GODEBUG option is not explicitly turned off, this function will silently +// fail and return an empty slice. func readCookies(h Header, filter string) []*Cookie { lines := h["Cookie"] if len(lines) == 0 { return []*Cookie{} } + // Cookie limit was unfortunately introduced at a later point in time. + // As such, we can only fail by returning an empty slice rather than + // explicit error. + cookieCount := 0 + for _, line := range lines { + cookieCount += strings.Count(line, ";") + 1 + } + if !cookieNumWithinMax(cookieCount) { + return []*Cookie{} + } + cookies := make([]*Cookie, 0, len(lines)+strings.Count(lines[0], ";")) for _, line := range lines { line = textproto.TrimString(line) diff --git a/src/net/http/cookie_test.go b/src/net/http/cookie_test.go index aac69563624fcd..d028725f31c4a3 100644 --- a/src/net/http/cookie_test.go +++ b/src/net/http/cookie_test.go @@ -11,6 +11,7 @@ import ( "log" "os" "reflect" + "slices" "strings" "testing" "time" @@ -255,16 +256,17 @@ func TestAddCookie(t *testing.T) { } var readSetCookiesTests = []struct { - Header Header - Cookies []*Cookie + header Header + cookies []*Cookie + godebug string }{ { - Header{"Set-Cookie": {"Cookie-1=v$1"}}, - []*Cookie{{Name: "Cookie-1", Value: "v$1", Raw: "Cookie-1=v$1"}}, + header: Header{"Set-Cookie": {"Cookie-1=v$1"}}, + cookies: []*Cookie{{Name: "Cookie-1", Value: "v$1", Raw: "Cookie-1=v$1"}}, }, { - Header{"Set-Cookie": {"NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=/; domain=.google.ch; HttpOnly"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=/; domain=.google.ch; HttpOnly"}}, + cookies: []*Cookie{{ Name: "NID", Value: "99=YsDT5i3E-CXax-", Path: "/", @@ -276,8 +278,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {".ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {".ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly"}}, + cookies: []*Cookie{{ Name: ".ASPXAUTH", Value: "7E3AA", Path: "/", @@ -288,8 +290,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {"ASP.NET_SessionId=foo; path=/; HttpOnly"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"ASP.NET_SessionId=foo; path=/; HttpOnly"}}, + cookies: []*Cookie{{ Name: "ASP.NET_SessionId", Value: "foo", Path: "/", @@ -298,8 +300,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {"samesitedefault=foo; SameSite"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"samesitedefault=foo; SameSite"}}, + cookies: []*Cookie{{ Name: "samesitedefault", Value: "foo", SameSite: SameSiteDefaultMode, @@ -307,8 +309,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {"samesiteinvalidisdefault=foo; SameSite=invalid"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"samesiteinvalidisdefault=foo; SameSite=invalid"}}, + cookies: []*Cookie{{ Name: "samesiteinvalidisdefault", Value: "foo", SameSite: SameSiteDefaultMode, @@ -316,8 +318,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {"samesitelax=foo; SameSite=Lax"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"samesitelax=foo; SameSite=Lax"}}, + cookies: []*Cookie{{ Name: "samesitelax", Value: "foo", SameSite: SameSiteLaxMode, @@ -325,8 +327,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {"samesitestrict=foo; SameSite=Strict"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"samesitestrict=foo; SameSite=Strict"}}, + cookies: []*Cookie{{ Name: "samesitestrict", Value: "foo", SameSite: SameSiteStrictMode, @@ -334,8 +336,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {"samesitenone=foo; SameSite=None"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"samesitenone=foo; SameSite=None"}}, + cookies: []*Cookie{{ Name: "samesitenone", Value: "foo", SameSite: SameSiteNoneMode, @@ -345,47 +347,66 @@ var readSetCookiesTests = []struct { // Make sure we can properly read back the Set-Cookie headers we create // for values containing spaces or commas: { - Header{"Set-Cookie": {`special-1=a z`}}, - []*Cookie{{Name: "special-1", Value: "a z", Raw: `special-1=a z`}}, + header: Header{"Set-Cookie": {`special-1=a z`}}, + cookies: []*Cookie{{Name: "special-1", Value: "a z", Raw: `special-1=a z`}}, }, { - Header{"Set-Cookie": {`special-2=" z"`}}, - []*Cookie{{Name: "special-2", Value: " z", Quoted: true, Raw: `special-2=" z"`}}, + header: Header{"Set-Cookie": {`special-2=" z"`}}, + cookies: []*Cookie{{Name: "special-2", Value: " z", Quoted: true, Raw: `special-2=" z"`}}, }, { - Header{"Set-Cookie": {`special-3="a "`}}, - []*Cookie{{Name: "special-3", Value: "a ", Quoted: true, Raw: `special-3="a "`}}, + header: Header{"Set-Cookie": {`special-3="a "`}}, + cookies: []*Cookie{{Name: "special-3", Value: "a ", Quoted: true, Raw: `special-3="a "`}}, }, { - Header{"Set-Cookie": {`special-4=" "`}}, - []*Cookie{{Name: "special-4", Value: " ", Quoted: true, Raw: `special-4=" "`}}, + header: Header{"Set-Cookie": {`special-4=" "`}}, + cookies: []*Cookie{{Name: "special-4", Value: " ", Quoted: true, Raw: `special-4=" "`}}, }, { - Header{"Set-Cookie": {`special-5=a,z`}}, - []*Cookie{{Name: "special-5", Value: "a,z", Raw: `special-5=a,z`}}, + header: Header{"Set-Cookie": {`special-5=a,z`}}, + cookies: []*Cookie{{Name: "special-5", Value: "a,z", Raw: `special-5=a,z`}}, }, { - Header{"Set-Cookie": {`special-6=",z"`}}, - []*Cookie{{Name: "special-6", Value: ",z", Quoted: true, Raw: `special-6=",z"`}}, + header: Header{"Set-Cookie": {`special-6=",z"`}}, + cookies: []*Cookie{{Name: "special-6", Value: ",z", Quoted: true, Raw: `special-6=",z"`}}, }, { - Header{"Set-Cookie": {`special-7=a,`}}, - []*Cookie{{Name: "special-7", Value: "a,", Raw: `special-7=a,`}}, + header: Header{"Set-Cookie": {`special-7=a,`}}, + cookies: []*Cookie{{Name: "special-7", Value: "a,", Raw: `special-7=a,`}}, }, { - Header{"Set-Cookie": {`special-8=","`}}, - []*Cookie{{Name: "special-8", Value: ",", Quoted: true, Raw: `special-8=","`}}, + header: Header{"Set-Cookie": {`special-8=","`}}, + cookies: []*Cookie{{Name: "special-8", Value: ",", Quoted: true, Raw: `special-8=","`}}, }, // Make sure we can properly read back the Set-Cookie headers // for names containing spaces: { - Header{"Set-Cookie": {`special-9 =","`}}, - []*Cookie{{Name: "special-9", Value: ",", Quoted: true, Raw: `special-9 =","`}}, + header: Header{"Set-Cookie": {`special-9 =","`}}, + cookies: []*Cookie{{Name: "special-9", Value: ",", Quoted: true, Raw: `special-9 =","`}}, }, // Quoted values (issue #46443) { - Header{"Set-Cookie": {`cookie="quoted"`}}, - []*Cookie{{Name: "cookie", Value: "quoted", Quoted: true, Raw: `cookie="quoted"`}}, + header: Header{"Set-Cookie": {`cookie="quoted"`}}, + cookies: []*Cookie{{Name: "cookie", Value: "quoted", Quoted: true, Raw: `cookie="quoted"`}}, + }, + { + header: Header{"Set-Cookie": slices.Repeat([]string{"a="}, defaultCookieMaxNum+1)}, + cookies: []*Cookie{}, + }, + { + header: Header{"Set-Cookie": slices.Repeat([]string{"a="}, 10)}, + cookies: []*Cookie{}, + godebug: "httpcookiemaxnum=5", + }, + { + header: Header{"Set-Cookie": strings.Split(strings.Repeat(";a=", defaultCookieMaxNum+1)[1:], ";")}, + cookies: slices.Repeat([]*Cookie{{Name: "a", Value: "", Quoted: false, Raw: "a="}}, defaultCookieMaxNum+1), + godebug: "httpcookiemaxnum=0", + }, + { + header: Header{"Set-Cookie": strings.Split(strings.Repeat(";a=", defaultCookieMaxNum+1)[1:], ";")}, + cookies: slices.Repeat([]*Cookie{{Name: "a", Value: "", Quoted: false, Raw: "a="}}, defaultCookieMaxNum+1), + godebug: fmt.Sprintf("httpcookiemaxnum=%v", defaultCookieMaxNum+1), }, // TODO(bradfitz): users have reported seeing this in the @@ -405,79 +426,103 @@ func toJSON(v any) string { func TestReadSetCookies(t *testing.T) { for i, tt := range readSetCookiesTests { + t.Setenv("GODEBUG", tt.godebug) for n := 0; n < 2; n++ { // to verify readSetCookies doesn't mutate its input - c := readSetCookies(tt.Header) - if !reflect.DeepEqual(c, tt.Cookies) { - t.Errorf("#%d readSetCookies: have\n%s\nwant\n%s\n", i, toJSON(c), toJSON(tt.Cookies)) + c := readSetCookies(tt.header) + if !reflect.DeepEqual(c, tt.cookies) { + t.Errorf("#%d readSetCookies: have\n%s\nwant\n%s\n", i, toJSON(c), toJSON(tt.cookies)) } } } } var readCookiesTests = []struct { - Header Header - Filter string - Cookies []*Cookie + header Header + filter string + cookies []*Cookie + godebug string }{ { - Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}}, - "", - []*Cookie{ + header: Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}}, + filter: "", + cookies: []*Cookie{ {Name: "Cookie-1", Value: "v$1"}, {Name: "c2", Value: "v2"}, }, }, { - Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}}, - "c2", - []*Cookie{ + header: Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}}, + filter: "c2", + cookies: []*Cookie{ {Name: "c2", Value: "v2"}, }, }, { - Header{"Cookie": {"Cookie-1=v$1; c2=v2"}}, - "", - []*Cookie{ + header: Header{"Cookie": {"Cookie-1=v$1; c2=v2"}}, + filter: "", + cookies: []*Cookie{ {Name: "Cookie-1", Value: "v$1"}, {Name: "c2", Value: "v2"}, }, }, { - Header{"Cookie": {"Cookie-1=v$1; c2=v2"}}, - "c2", - []*Cookie{ + header: Header{"Cookie": {"Cookie-1=v$1; c2=v2"}}, + filter: "c2", + cookies: []*Cookie{ {Name: "c2", Value: "v2"}, }, }, { - Header{"Cookie": {`Cookie-1="v$1"; c2="v2"`}}, - "", - []*Cookie{ + header: Header{"Cookie": {`Cookie-1="v$1"; c2="v2"`}}, + filter: "", + cookies: []*Cookie{ {Name: "Cookie-1", Value: "v$1", Quoted: true}, {Name: "c2", Value: "v2", Quoted: true}, }, }, { - Header{"Cookie": {`Cookie-1="v$1"; c2=v2;`}}, - "", - []*Cookie{ + header: Header{"Cookie": {`Cookie-1="v$1"; c2=v2;`}}, + filter: "", + cookies: []*Cookie{ {Name: "Cookie-1", Value: "v$1", Quoted: true}, {Name: "c2", Value: "v2"}, }, }, { - Header{"Cookie": {``}}, - "", - []*Cookie{}, + header: Header{"Cookie": {``}}, + filter: "", + cookies: []*Cookie{}, + }, + // GODEBUG=httpcookiemaxnum should work regardless if all cookies are sent + // via one "Cookie" field, or multiple fields. + { + header: Header{"Cookie": {strings.Repeat(";a=", defaultCookieMaxNum+1)[1:]}}, + cookies: []*Cookie{}, + }, + { + header: Header{"Cookie": slices.Repeat([]string{"a="}, 10)}, + cookies: []*Cookie{}, + godebug: "httpcookiemaxnum=5", + }, + { + header: Header{"Cookie": {strings.Repeat(";a=", defaultCookieMaxNum+1)[1:]}}, + cookies: slices.Repeat([]*Cookie{{Name: "a", Value: "", Quoted: false}}, defaultCookieMaxNum+1), + godebug: "httpcookiemaxnum=0", + }, + { + header: Header{"Cookie": slices.Repeat([]string{"a="}, defaultCookieMaxNum+1)}, + cookies: slices.Repeat([]*Cookie{{Name: "a", Value: "", Quoted: false}}, defaultCookieMaxNum+1), + godebug: fmt.Sprintf("httpcookiemaxnum=%v", defaultCookieMaxNum+1), }, } func TestReadCookies(t *testing.T) { for i, tt := range readCookiesTests { + t.Setenv("GODEBUG", tt.godebug) for n := 0; n < 2; n++ { // to verify readCookies doesn't mutate its input - c := readCookies(tt.Header, tt.Filter) - if !reflect.DeepEqual(c, tt.Cookies) { - t.Errorf("#%d readCookies:\nhave: %s\nwant: %s\n", i, toJSON(c), toJSON(tt.Cookies)) + c := readCookies(tt.header, tt.filter) + if !reflect.DeepEqual(c, tt.cookies) { + t.Errorf("#%d readCookies:\nhave: %s\nwant: %s\n", i, toJSON(c), toJSON(tt.cookies)) } } } @@ -689,6 +734,7 @@ func TestParseCookie(t *testing.T) { line string cookies []*Cookie err error + godebug string }{ { line: "Cookie-1=v$1", @@ -722,8 +768,28 @@ func TestParseCookie(t *testing.T) { line: "k1=\\", err: errInvalidCookieValue, }, + { + line: strings.Repeat(";a=", defaultCookieMaxNum+1)[1:], + err: errCookieNumLimitExceeded, + }, + { + line: strings.Repeat(";a=", 10)[1:], + err: errCookieNumLimitExceeded, + godebug: "httpcookiemaxnum=5", + }, + { + line: strings.Repeat(";a=", defaultCookieMaxNum+1)[1:], + cookies: slices.Repeat([]*Cookie{{Name: "a", Value: "", Quoted: false}}, defaultCookieMaxNum+1), + godebug: "httpcookiemaxnum=0", + }, + { + line: strings.Repeat(";a=", defaultCookieMaxNum+1)[1:], + cookies: slices.Repeat([]*Cookie{{Name: "a", Value: "", Quoted: false}}, defaultCookieMaxNum+1), + godebug: fmt.Sprintf("httpcookiemaxnum=%v", defaultCookieMaxNum+1), + }, } for i, tt := range tests { + t.Setenv("GODEBUG", tt.godebug) gotCookies, gotErr := ParseCookie(tt.line) if !errors.Is(gotErr, tt.err) { t.Errorf("#%d ParseCookie got error %v, want error %v", i, gotErr, tt.err) diff --git a/src/net/http/csrf.go b/src/net/http/csrf.go index 5e1b686fd1ccde..d088b9b615145b 100644 --- a/src/net/http/csrf.go +++ b/src/net/http/csrf.go @@ -77,13 +77,21 @@ func (c *CrossOriginProtection) AddTrustedOrigin(origin string) error { return nil } -var noopHandler = HandlerFunc(func(w ResponseWriter, r *Request) {}) +type noopHandler struct{} + +func (noopHandler) ServeHTTP(ResponseWriter, *Request) {} + +var sentinelHandler Handler = &noopHandler{} // AddInsecureBypassPattern permits all requests that match the given pattern. -// The pattern syntax and precedence rules are the same as [ServeMux]. // -// AddInsecureBypassPattern can be called concurrently with other methods -// or request handling, and applies to future requests. +// The pattern syntax and precedence rules are the same as [ServeMux]. Only +// requests that match the pattern directly are permitted. Those that ServeMux +// would redirect to a pattern (e.g. after cleaning the path or adding a +// trailing slash) are not. +// +// AddInsecureBypassPattern can be called concurrently with other methods or +// request handling, and applies to future requests. func (c *CrossOriginProtection) AddInsecureBypassPattern(pattern string) { var bypass *ServeMux @@ -99,7 +107,7 @@ func (c *CrossOriginProtection) AddInsecureBypassPattern(pattern string) { } } - bypass.Handle(pattern, noopHandler) + bypass.Handle(pattern, sentinelHandler) } // SetDenyHandler sets a handler to invoke when a request is rejected. @@ -172,7 +180,7 @@ var ( // be deferred until the last moment. func (c *CrossOriginProtection) isRequestExempt(req *Request) bool { if bypass := c.bypass.Load(); bypass != nil { - if _, pattern := bypass.Handler(req); pattern != "" { + if h, _ := bypass.Handler(req); h == sentinelHandler { // The request matches a bypass pattern. return true } diff --git a/src/net/http/csrf_test.go b/src/net/http/csrf_test.go index 30986a43b9aae8..a29e3ae16dff6b 100644 --- a/src/net/http/csrf_test.go +++ b/src/net/http/csrf_test.go @@ -113,6 +113,11 @@ func TestCrossOriginProtectionPatternBypass(t *testing.T) { protection := http.NewCrossOriginProtection() protection.AddInsecureBypassPattern("/bypass/") protection.AddInsecureBypassPattern("/only/{foo}") + protection.AddInsecureBypassPattern("/no-trailing") + protection.AddInsecureBypassPattern("/yes-trailing/") + protection.AddInsecureBypassPattern("PUT /put-only/") + protection.AddInsecureBypassPattern("GET /get-only/") + protection.AddInsecureBypassPattern("POST /post-only/") handler := protection.Handler(okHandler) tests := []struct { @@ -126,13 +131,23 @@ func TestCrossOriginProtectionPatternBypass(t *testing.T) { {"non-bypass path without sec-fetch-site", "/api/", "", http.StatusForbidden}, {"non-bypass path with cross-site", "/api/", "cross-site", http.StatusForbidden}, - {"redirect to bypass path without ..", "/foo/../bypass/bar", "", http.StatusOK}, - {"redirect to bypass path with trailing slash", "/bypass", "", http.StatusOK}, + {"redirect to bypass path without ..", "/foo/../bypass/bar", "", http.StatusForbidden}, + {"redirect to bypass path with trailing slash", "/bypass", "", http.StatusForbidden}, {"redirect to non-bypass path with ..", "/foo/../api/bar", "", http.StatusForbidden}, {"redirect to non-bypass path with trailing slash", "/api", "", http.StatusForbidden}, {"wildcard bypass", "/only/123", "", http.StatusOK}, {"non-wildcard", "/only/123/foo", "", http.StatusForbidden}, + + // https://go.dev/issue/75054 + {"no trailing slash exact match", "/no-trailing", "", http.StatusOK}, + {"no trailing slash with slash", "/no-trailing/", "", http.StatusForbidden}, + {"yes trailing slash exact match", "/yes-trailing/", "", http.StatusOK}, + {"yes trailing slash without slash", "/yes-trailing", "", http.StatusForbidden}, + + {"method-specific hit", "/post-only/", "", http.StatusOK}, + {"method-specific miss (PUT)", "/put-only/", "", http.StatusForbidden}, + {"method-specific miss (GET)", "/get-only/", "", http.StatusForbidden}, } for _, tc := range tests { diff --git a/src/net/http/transport.go b/src/net/http/transport.go index 07b3a9e1e72ba6..2778db332e4319 100644 --- a/src/net/http/transport.go +++ b/src/net/http/transport.go @@ -1372,7 +1372,10 @@ func (w *wantConn) cancel(t *Transport) { w.done = true w.mu.Unlock() - if pc != nil { + // HTTP/2 connections (pc.alt != nil) aren't removed from the idle pool on use, + // and should not be added back here. If the pconn isn't in the idle pool, + // it's because we removed it due to an error. + if pc != nil && pc.alt == nil { t.putOrCloseIdleConn(pc) } } diff --git a/src/net/http/transport_test.go b/src/net/http/transport_test.go index 9762f058867dc1..28ad3eb6feb54f 100644 --- a/src/net/http/transport_test.go +++ b/src/net/http/transport_test.go @@ -7559,3 +7559,35 @@ func TestTransportServerProtocols(t *testing.T) { }) } } + +func TestIssue61474(t *testing.T) { + run(t, testIssue61474, []testMode{http2Mode}) +} +func testIssue61474(t *testing.T, mode testMode) { + if testing.Short() { + return + } + + // This test reliably exercises the condition causing #61474, + // but requires many iterations to do so. + // Keep the test around for now, but don't run it by default. + t.Skip("test is too large") + + cst := newClientServerTest(t, mode, HandlerFunc(func(rw ResponseWriter, req *Request) { + }), func(tr *Transport) { + tr.MaxConnsPerHost = 1 + }) + var wg sync.WaitGroup + defer wg.Wait() + for range 100000 { + wg.Go(func() { + ctx, cancel := context.WithTimeout(t.Context(), 1*time.Millisecond) + defer cancel() + req, _ := NewRequestWithContext(ctx, "GET", cst.ts.URL, nil) + resp, err := cst.c.Do(req) + if err == nil { + resp.Body.Close() + } + }) + } +} diff --git a/src/net/ipsock_posix.go b/src/net/ipsock_posix.go index 2aeabd44873f22..52712f932f7530 100644 --- a/src/net/ipsock_posix.go +++ b/src/net/ipsock_posix.go @@ -237,8 +237,12 @@ func ipToSockaddr(family int, ip IP, port int, zone string) (syscall.Sockaddr, e func addrPortToSockaddrInet4(ap netip.AddrPort) (syscall.SockaddrInet4, error) { // ipToSockaddrInet4 has special handling here for zero length slices. // We do not, because netip has no concept of a generic zero IP address. + // + // addr is allowed to be an IPv4-mapped IPv6 address. + // As4 will unmap it to an IPv4 address. + // The error message is kept consistent with ipToSockaddrInet4. addr := ap.Addr() - if !addr.Is4() { + if !addr.Is4() && !addr.Is4In6() { return syscall.SockaddrInet4{}, &AddrError{Err: "non-IPv4 address", Addr: addr.String()} } sa := syscall.SockaddrInet4{ diff --git a/src/net/mail/message.go b/src/net/mail/message.go index 14f839a03077c1..1502b3596252ba 100644 --- a/src/net/mail/message.go +++ b/src/net/mail/message.go @@ -724,7 +724,8 @@ func (p *addrParser) consumeDomainLiteral() (string, error) { } // Parse the dtext - var dtext string + dtext := p.s + dtextLen := 0 for { if p.empty() { return "", errors.New("mail: unclosed domain-literal") @@ -741,9 +742,10 @@ func (p *addrParser) consumeDomainLiteral() (string, error) { return "", fmt.Errorf("mail: bad character in domain-literal: %q", r) } - dtext += p.s[:size] + dtextLen += size p.s = p.s[size:] } + dtext = dtext[:dtextLen] // Skip the trailing ] if !p.consume(']') { diff --git a/src/net/textproto/reader.go b/src/net/textproto/reader.go index d375340121fa0b..94e3e0b53d115e 100644 --- a/src/net/textproto/reader.go +++ b/src/net/textproto/reader.go @@ -284,8 +284,10 @@ func (r *Reader) ReadCodeLine(expectCode int) (code int, message string, err err // // An expectCode <= 0 disables the check of the status code. func (r *Reader) ReadResponse(expectCode int) (code int, message string, err error) { - code, continued, message, err := r.readCodeLine(expectCode) + code, continued, first, err := r.readCodeLine(expectCode) multi := continued + var messageBuilder strings.Builder + messageBuilder.WriteString(first) for continued { line, err := r.ReadLine() if err != nil { @@ -296,12 +298,15 @@ func (r *Reader) ReadResponse(expectCode int) (code int, message string, err err var moreMessage string code2, continued, moreMessage, err = parseCodeLine(line, 0) if err != nil || code2 != code { - message += "\n" + strings.TrimRight(line, "\r\n") + messageBuilder.WriteByte('\n') + messageBuilder.WriteString(strings.TrimRight(line, "\r\n")) continued = true continue } - message += "\n" + moreMessage + messageBuilder.WriteByte('\n') + messageBuilder.WriteString(moreMessage) } + message = messageBuilder.String() if err != nil && multi && message != "" { // replace one line error message with all lines (full message) err = &Error{code, message} diff --git a/src/net/udpsock_test.go b/src/net/udpsock_test.go index 6dacc81df6e059..3c8da43bdd9b61 100644 --- a/src/net/udpsock_test.go +++ b/src/net/udpsock_test.go @@ -705,3 +705,40 @@ func TestIPv6WriteMsgUDPAddrPortTargetAddrIPVersion(t *testing.T) { t.Fatal(err) } } + +// TestIPv4WriteMsgUDPAddrPortTargetAddrIPVersion verifies that +// WriteMsgUDPAddrPort accepts IPv4 and IPv4-mapped IPv6 destination addresses, +// and rejects IPv6 destination addresses on a "udp4" connection. +func TestIPv4WriteMsgUDPAddrPortTargetAddrIPVersion(t *testing.T) { + switch runtime.GOOS { + case "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + + if !testableNetwork("udp4") { + t.Skipf("skipping: udp4 not available") + } + + conn, err := ListenUDP("udp4", &UDPAddr{IP: IPv4(127, 0, 0, 1)}) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + daddr4 := netip.AddrPortFrom(netip.MustParseAddr("127.0.0.1"), 12345) + daddr4in6 := netip.AddrPortFrom(netip.MustParseAddr("::ffff:127.0.0.1"), 12345) + daddr6 := netip.AddrPortFrom(netip.MustParseAddr("::1"), 12345) + buf := make([]byte, 8) + + if _, _, err = conn.WriteMsgUDPAddrPort(buf, nil, daddr4); err != nil { + t.Errorf("conn.WriteMsgUDPAddrPort(buf, nil, daddr4) failed: %v", err) + } + + if _, _, err = conn.WriteMsgUDPAddrPort(buf, nil, daddr4in6); err != nil { + t.Errorf("conn.WriteMsgUDPAddrPort(buf, nil, daddr4in6) failed: %v", err) + } + + if _, _, err = conn.WriteMsgUDPAddrPort(buf, nil, daddr6); err == nil { + t.Errorf("conn.WriteMsgUDPAddrPort(buf, nil, daddr6) should have failed, but got no error") + } +} diff --git a/src/net/url/url.go b/src/net/url/url.go index 2a57659460373d..1c50e069613a81 100644 --- a/src/net/url/url.go +++ b/src/net/url/url.go @@ -16,6 +16,7 @@ import ( "errors" "fmt" "maps" + "net/netip" "path" "slices" "strconv" @@ -626,40 +627,61 @@ func parseAuthority(authority string) (user *Userinfo, host string, err error) { // parseHost parses host as an authority without user // information. That is, as host[:port]. func parseHost(host string) (string, error) { - if strings.HasPrefix(host, "[") { + if openBracketIdx := strings.LastIndex(host, "["); openBracketIdx != -1 { // Parse an IP-Literal in RFC 3986 and RFC 6874. // E.g., "[fe80::1]", "[fe80::1%25en0]", "[fe80::1]:80". - i := strings.LastIndex(host, "]") - if i < 0 { + closeBracketIdx := strings.LastIndex(host, "]") + if closeBracketIdx < 0 { return "", errors.New("missing ']' in host") } - colonPort := host[i+1:] + + colonPort := host[closeBracketIdx+1:] if !validOptionalPort(colonPort) { return "", fmt.Errorf("invalid port %q after host", colonPort) } + unescapedColonPort, err := unescape(colonPort, encodeHost) + if err != nil { + return "", err + } + hostname := host[openBracketIdx+1 : closeBracketIdx] + var unescapedHostname string // RFC 6874 defines that %25 (%-encoded percent) introduces // the zone identifier, and the zone identifier can use basically // any %-encoding it likes. That's different from the host, which // can only %-encode non-ASCII bytes. // We do impose some restrictions on the zone, to avoid stupidity // like newlines. - zone := strings.Index(host[:i], "%25") - if zone >= 0 { - host1, err := unescape(host[:zone], encodeHost) + zoneIdx := strings.Index(hostname, "%25") + if zoneIdx >= 0 { + hostPart, err := unescape(hostname[:zoneIdx], encodeHost) if err != nil { return "", err } - host2, err := unescape(host[zone:i], encodeZone) + zonePart, err := unescape(hostname[zoneIdx:], encodeZone) if err != nil { return "", err } - host3, err := unescape(host[i:], encodeHost) + unescapedHostname = hostPart + zonePart + } else { + var err error + unescapedHostname, err = unescape(hostname, encodeHost) if err != nil { return "", err } - return host1 + host2 + host3, nil } + + // Per RFC 3986, only a host identified by a valid + // IPv6 address can be enclosed by square brackets. + // This excludes any IPv4, but notably not IPv4-mapped addresses. + addr, err := netip.ParseAddr(unescapedHostname) + if err != nil { + return "", fmt.Errorf("invalid host: %w", err) + } + if addr.Is4() { + return "", errors.New("invalid IP-literal") + } + return "[" + unescapedHostname + "]" + unescapedColonPort, nil } else if i := strings.LastIndex(host, ":"); i != -1 { colonPort := host[i:] if !validOptionalPort(colonPort) { diff --git a/src/net/url/url_test.go b/src/net/url/url_test.go index 16e08b63c6d098..6084facacc0519 100644 --- a/src/net/url/url_test.go +++ b/src/net/url/url_test.go @@ -383,6 +383,16 @@ var urltests = []URLTest{ }, "", }, + // valid IPv6 host with port and path + { + "https://[2001:db8::1]:8443/test/path", + &URL{ + Scheme: "https", + Host: "[2001:db8::1]:8443", + Path: "/test/path", + }, + "", + }, // host subcomponent; IPv6 address with zone identifier in RFC 6874 { "http://[fe80::1%25en0]/", // alphanum zone identifier @@ -707,6 +717,24 @@ var parseRequestURLTests = []struct { // RFC 6874. {"http://[fe80::1%en0]/", false}, {"http://[fe80::1%en0]:8080/", false}, + + // Tests exercising RFC 3986 compliance + {"https://[1:2:3:4:5:6:7:8]", true}, // full IPv6 address + {"https://[2001:db8::a:b:c:d]", true}, // compressed IPv6 address + {"https://[fe80::1%25eth0]", true}, // link-local address with zone ID (interface name) + {"https://[fe80::abc:def%254]", true}, // link-local address with zone ID (interface index) + {"https://[2001:db8::1]/path", true}, // compressed IPv6 address with path + {"https://[fe80::1%25eth0]/path?query=1", true}, // link-local with zone, path, and query + + {"https://[::ffff:192.0.2.1]", true}, + {"https://[:1] ", false}, + {"https://[1:2:3:4:5:6:7:8:9]", false}, + {"https://[1::1::1]", false}, + {"https://[1:2:3:]", false}, + {"https://[ffff::127.0.0.4000]", false}, + {"https://[0:0::test.com]:80", false}, + {"https://[2001:db8::test.com]", false}, + {"https://[test.com]", false}, } func TestParseRequestURI(t *testing.T) { @@ -1643,6 +1671,18 @@ func TestParseErrors(t *testing.T) { {"cache_object:foo", true}, {"cache_object:foo/bar", true}, {"cache_object/:foo/bar", false}, + + {"http://[192.168.0.1]/", true}, // IPv4 in brackets + {"http://[192.168.0.1]:8080/", true}, // IPv4 in brackets with port + {"http://[::ffff:192.168.0.1]/", false}, // IPv4-mapped IPv6 in brackets + {"http://[::ffff:192.168.0.1000]/", true}, // Out of range IPv4-mapped IPv6 in brackets + {"http://[::ffff:192.168.0.1]:8080/", false}, // IPv4-mapped IPv6 in brackets with port + {"http://[::ffff:c0a8:1]/", false}, // IPv4-mapped IPv6 in brackets (hex) + {"http://[not-an-ip]/", true}, // invalid IP string in brackets + {"http://[fe80::1%foo]/", true}, // invalid zone format in brackets + {"http://[fe80::1", true}, // missing closing bracket + {"http://fe80::1]/", true}, // missing opening bracket + {"http://[test.com]/", true}, // domain name in brackets } for _, tt := range tests { u, err := Parse(tt.in) diff --git a/src/os/exec/lp_plan9.go b/src/os/exec/lp_plan9.go index 0430af9eefeb42..f713a6905cfbdc 100644 --- a/src/os/exec/lp_plan9.go +++ b/src/os/exec/lp_plan9.go @@ -36,7 +36,7 @@ func findExecutable(file string) error { // As of Go 1.19, LookPath will instead return that path along with an error satisfying // [errors.Is](err, [ErrDot]). See the package documentation for more details. func LookPath(file string) (string, error) { - if err := validateLookPath(file); err != nil { + if err := validateLookPath(filepath.Clean(file)); err != nil { return "", &Error{file, err} } diff --git a/src/os/os_windows_test.go b/src/os/os_windows_test.go index 515d1c135901e0..6f091c38184189 100644 --- a/src/os/os_windows_test.go +++ b/src/os/os_windows_test.go @@ -1845,6 +1845,72 @@ func TestFile(t *testing.T) { } } +func TestFileOverlappedSeek(t *testing.T) { + t.Parallel() + name := filepath.Join(t.TempDir(), "foo") + f := newFileOverlapped(t, name, true) + content := []byte("foo") + if _, err := f.Write(content); err != nil { + t.Fatal(err) + } + // Check that the file pointer is at the expected offset. + n, err := f.Seek(0, io.SeekCurrent) + if err != nil { + t.Fatal(err) + } + if n != int64(len(content)) { + t.Errorf("expected file pointer to be at offset %d, got %d", len(content), n) + } + // Set the file pointer to the start of the file. + if _, err := f.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } + // Read the first byte. + var buf [1]byte + if _, err := f.Read(buf[:]); err != nil { + t.Fatal(err) + } + if !bytes.Equal(buf[:], content[:len(buf)]) { + t.Errorf("expected %q, got %q", content[:len(buf)], buf[:]) + } + // Check that the file pointer is at the expected offset. + n, err = f.Seek(0, io.SeekCurrent) + if err != nil { + t.Fatal(err) + } + if n != int64(len(buf)) { + t.Errorf("expected file pointer to be at offset %d, got %d", len(buf), n) + } +} + +func TestFileOverlappedReadAtVolume(t *testing.T) { + // Test that we can use File.ReadAt with an overlapped volume handle. + // See https://go.dev/issues/74951. + t.Parallel() + name := `\\.\` + filepath.VolumeName(t.TempDir()) + namep, err := syscall.UTF16PtrFromString(name) + if err != nil { + t.Fatal(err) + } + h, err := syscall.CreateFile(namep, + syscall.GENERIC_READ|syscall.GENERIC_WRITE, + syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_READ, + nil, syscall.OPEN_ALWAYS, syscall.FILE_FLAG_OVERLAPPED, 0) + if err != nil { + if errors.Is(err, syscall.ERROR_ACCESS_DENIED) { + t.Skip("skipping test: access denied") + } + t.Fatal(err) + } + f := os.NewFile(uintptr(h), name) + defer f.Close() + + var buf [0]byte + if _, err := f.ReadAt(buf[:], 0); err != nil { + t.Fatal(err) + } +} + func TestPipe(t *testing.T) { t.Parallel() r, w, err := os.Pipe() diff --git a/src/os/path_windows_test.go b/src/os/path_windows_test.go index 3fa02e2a65b083..eea2b58ee0a886 100644 --- a/src/os/path_windows_test.go +++ b/src/os/path_windows_test.go @@ -236,6 +236,23 @@ func TestRemoveAllLongPathRelative(t *testing.T) { } } +func TestRemoveAllFallback(t *testing.T) { + windows.TestDeleteatFallback = true + t.Cleanup(func() { windows.TestDeleteatFallback = false }) + + dir := t.TempDir() + if err := os.WriteFile(filepath.Join(dir, "file1"), []byte{}, 0700); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(dir, "file2"), []byte{}, 0400); err != nil { // read-only file + t.Fatal(err) + } + + if err := os.RemoveAll(dir); err != nil { + t.Fatal(err) + } +} + func testLongPathAbs(t *testing.T, target string) { t.Helper() testWalkFn := func(path string, info os.FileInfo, err error) error { diff --git a/src/os/root_openat.go b/src/os/root_openat.go index e433bd5093fdb0..83bde5ef14160d 100644 --- a/src/os/root_openat.go +++ b/src/os/root_openat.go @@ -131,7 +131,9 @@ func rootMkdirAll(r *Root, fullname string, perm FileMode) error { if try > 0 || !IsNotExist(err) { return 0, &PathError{Op: "openat", Err: err} } - if err := mkdirat(parent, name, perm); err != nil { + // Try again on EEXIST, because the directory may have been created + // by another process or thread between the rootOpenDir and mkdirat calls. + if err := mkdirat(parent, name, perm); err != nil && err != syscall.EEXIST { return 0, &PathError{Op: "mkdirat", Err: err} } } diff --git a/src/os/root_test.go b/src/os/root_test.go index effcdeab433e78..f9fbd11575e6a0 100644 --- a/src/os/root_test.go +++ b/src/os/root_test.go @@ -1919,3 +1919,36 @@ func TestRootWriteReadFile(t *testing.T) { t.Fatalf("root.ReadFile(%q) = %q, %v; want %q, nil", name, got, err, want) } } + +func TestRootName(t *testing.T) { + dir := t.TempDir() + root, err := os.OpenRoot(dir) + if err != nil { + t.Fatal(err) + } + defer root.Close() + if got, want := root.Name(), dir; got != want { + t.Errorf("root.Name() = %q, want %q", got, want) + } + + f, err := root.Create("file") + if err != nil { + t.Fatal(err) + } + defer f.Close() + if got, want := f.Name(), filepath.Join(dir, "file"); got != want { + t.Errorf(`root.Create("file").Name() = %q, want %q`, got, want) + } + + if err := root.Mkdir("dir", 0o777); err != nil { + t.Fatal(err) + } + subroot, err := root.OpenRoot("dir") + if err != nil { + t.Fatal(err) + } + defer subroot.Close() + if got, want := subroot.Name(), filepath.Join(dir, "dir"); got != want { + t.Errorf(`root.OpenRoot("dir").Name() = %q, want %q`, got, want) + } +} diff --git a/src/os/root_unix.go b/src/os/root_unix.go index 4d6fc19a080434..c891e81b793bb5 100644 --- a/src/os/root_unix.go +++ b/src/os/root_unix.go @@ -75,7 +75,7 @@ func openRootInRoot(r *Root, name string) (*Root, error) { if err != nil { return nil, &PathError{Op: "openat", Path: name, Err: err} } - return newRoot(fd, name) + return newRoot(fd, joinPath(r.Name(), name)) } // rootOpenFileNolog is Root.OpenFile. diff --git a/src/os/root_windows.go b/src/os/root_windows.go index 523ee48d13f8c0..2ec09cf2d3f48d 100644 --- a/src/os/root_windows.go +++ b/src/os/root_windows.go @@ -123,7 +123,7 @@ func openRootInRoot(r *Root, name string) (*Root, error) { if err != nil { return nil, &PathError{Op: "openat", Path: name, Err: err} } - return newRoot(fd, name) + return newRoot(fd, joinPath(r.Name(), name)) } // rootOpenFileNolog is Root.OpenFile. diff --git a/src/runtime/chan.go b/src/runtime/chan.go index bb554ebfdb1f3a..639d29dc8337f0 100644 --- a/src/runtime/chan.go +++ b/src/runtime/chan.go @@ -191,7 +191,7 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool { } if c.bubble != nil && getg().bubble != c.bubble { - panic(plainError("send on synctest channel from outside bubble")) + fatal("send on synctest channel from outside bubble") } // Fast path: check for failed non-blocking operation without acquiring the lock. @@ -318,7 +318,7 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool { func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) { if c.bubble != nil && getg().bubble != c.bubble { unlockf() - panic(plainError("send on synctest channel from outside bubble")) + fatal("send on synctest channel from outside bubble") } if raceenabled { if c.dataqsiz == 0 { @@ -416,7 +416,7 @@ func closechan(c *hchan) { panic(plainError("close of nil channel")) } if c.bubble != nil && getg().bubble != c.bubble { - panic(plainError("close of synctest channel from outside bubble")) + fatal("close of synctest channel from outside bubble") } lock(&c.lock) @@ -538,7 +538,7 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) } if c.bubble != nil && getg().bubble != c.bubble { - panic(plainError("receive on synctest channel from outside bubble")) + fatal("receive on synctest channel from outside bubble") } if c.timer != nil { @@ -702,7 +702,7 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) { if c.bubble != nil && getg().bubble != c.bubble { unlockf() - panic(plainError("receive on synctest channel from outside bubble")) + fatal("receive on synctest channel from outside bubble") } if c.dataqsiz == 0 { if raceenabled { diff --git a/src/runtime/decoratemappings_test.go b/src/runtime/decoratemappings_test.go new file mode 100644 index 00000000000000..7d1121c125df08 --- /dev/null +++ b/src/runtime/decoratemappings_test.go @@ -0,0 +1,72 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "os" + "regexp" + "runtime" + "testing" +) + +func validateMapLabels(t *testing.T, labels []string) { + // These are the specific region labels that need get added during the + // runtime phase. Hence they are the ones that need to be confirmed as + // present at the time the test reads its own region labels, which + // is sufficient to validate that the default `decoratemappings` value + // (enabled) was set early enough in the init process. + regions := map[string]bool{ + "allspans array": false, + "gc bits": false, + "heap": false, + "heap index": false, + "heap reservation": false, + "immortal metadata": false, + "page alloc": false, + "page alloc index": false, + "page summary": false, + "scavenge index": false, + } + for _, label := range labels { + if _, ok := regions[label]; !ok { + t.Logf("unexpected region label found: \"%s\"", label) + } + regions[label] = true + } + for label, found := range regions { + if !found { + t.Logf("region label missing: \"%s\"", label) + } + } +} + +func TestDecorateMappings(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skip("decoratemappings is only supported on Linux") + // /proc/self/maps is also Linux-specific + } + + var labels []string + if rawMaps, err := os.ReadFile("/proc/self/maps"); err != nil { + t.Fatalf("failed to read /proc/self/maps: %v", err) + } else { + t.Logf("maps:%s\n", string(rawMaps)) + matches := regexp.MustCompile("[^[]+ \\[anon: Go: (.+)\\]\n").FindAllSubmatch(rawMaps, -1) + for _, match_pair := range matches { + // match_pair consists of the matching substring and the parenthesized group + labels = append(labels, string(match_pair[1])) + } + } + t.Logf("DebugDecorateMappings: %v", *runtime.DebugDecorateMappings) + if *runtime.DebugDecorateMappings != 0 && runtime.SetVMANameSupported() { + validateMapLabels(t, labels) + } else { + if len(labels) > 0 { + t.Errorf("unexpected mapping labels present: %v", labels) + } else { + t.Skip("mapping labels absent as expected") + } + } +} diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index 9a4611e26e52a2..6559ecec67bbde 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -1927,3 +1927,7 @@ func (t *TraceStackTable) Reset() { func TraceStack(gp *G, tab *TraceStackTable) { traceStack(0, gp, (*traceStackTable)(tab)) } + +var DebugDecorateMappings = &debug.decoratemappings + +func SetVMANameSupported() bool { return setVMANameSupported() } diff --git a/src/runtime/metrics/doc.go b/src/runtime/metrics/doc.go index a1902bc6d78c7f..251ee22fa1a51b 100644 --- a/src/runtime/metrics/doc.go +++ b/src/runtime/metrics/doc.go @@ -282,6 +282,11 @@ Below is the full list of supported metrics, ordered lexicographically. The number of non-default behaviors executed by the net/http package due to a non-default GODEBUG=http2server=... setting. + /godebug/non-default-behavior/httpcookiemaxnum:events + The number of non-default behaviors executed by the net/http + package due to a non-default GODEBUG=httpcookiemaxnum=... + setting. + /godebug/non-default-behavior/httplaxcontentlength:events The number of non-default behaviors executed by the net/http package due to a non-default GODEBUG=httplaxcontentlength=... diff --git a/src/runtime/proc.go b/src/runtime/proc.go index b41bbe93cf57c7..d898c9668b36dd 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -789,7 +789,9 @@ func cpuinit(env string) { // getGodebugEarly extracts the environment variable GODEBUG from the environment on // Unix-like operating systems and returns it. This function exists to extract GODEBUG // early before much of the runtime is initialized. -func getGodebugEarly() string { +// +// Returns nil, false if OS doesn't provide env vars early in the init sequence. +func getGodebugEarly() (string, bool) { const prefix = "GODEBUG=" var env string switch GOOS { @@ -807,12 +809,16 @@ func getGodebugEarly() string { s := unsafe.String(p, findnull(p)) if stringslite.HasPrefix(s, prefix) { - env = gostring(p)[len(prefix):] + env = gostringnocopy(p)[len(prefix):] break } } + break + + default: + return "", false } - return env + return env, true } // The bootstrap sequence is: @@ -859,11 +865,14 @@ func schedinit() { // The world starts stopped. worldStopped() + godebug, parsedGodebug := getGodebugEarly() + if parsedGodebug { + parseRuntimeDebugVars(godebug) + } ticks.init() // run as early as possible moduledataverify() stackinit() mallocinit() - godebug := getGodebugEarly() cpuinit(godebug) // must run before alginit randinit() // must run before alginit, mcommoninit alginit() // maps, hash, rand must not be used before this call @@ -880,7 +889,12 @@ func schedinit() { goenvs() secure() checkfds() - parsedebugvars() + if !parsedGodebug { + // Some platforms, e.g., Windows, didn't make env vars available "early", + // so try again now. + parseRuntimeDebugVars(gogetenv("GODEBUG")) + } + finishDebugVarsSetup() gcinit() // Allocate stack space that can be used when crashing due to bad stack diff --git a/src/runtime/runtime1.go b/src/runtime/runtime1.go index 424745d2357dc9..15b546783b5e53 100644 --- a/src/runtime/runtime1.go +++ b/src/runtime/runtime1.go @@ -402,7 +402,7 @@ var dbgvars = []*dbgVar{ {name: "updatemaxprocs", value: &debug.updatemaxprocs, def: 1}, } -func parsedebugvars() { +func parseRuntimeDebugVars(godebug string) { // defaults debug.cgocheck = 1 debug.invalidptr = 1 @@ -420,12 +420,6 @@ func parsedebugvars() { } debug.traceadvanceperiod = defaultTraceAdvancePeriod - godebug := gogetenv("GODEBUG") - - p := new(string) - *p = godebug - godebugEnv.Store(p) - // apply runtime defaults, if any for _, v := range dbgvars { if v.def != 0 { @@ -437,7 +431,6 @@ func parsedebugvars() { } } } - // apply compile-time GODEBUG settings parsegodebug(godebugDefault, nil) @@ -463,6 +456,12 @@ func parsedebugvars() { if debug.gccheckmark > 0 { debug.asyncpreemptoff = 1 } +} + +func finishDebugVarsSetup() { + p := new(string) + *p = gogetenv("GODEBUG") + godebugEnv.Store(p) setTraceback(gogetenv("GOTRACEBACK")) traceback_env = traceback_cache diff --git a/src/runtime/select.go b/src/runtime/select.go index ae7754b17377dd..113dc8ad19e984 100644 --- a/src/runtime/select.go +++ b/src/runtime/select.go @@ -178,7 +178,7 @@ func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, blo if cas.c.bubble != nil { if getg().bubble != cas.c.bubble { - panic(plainError("select on synctest channel from outside bubble")) + fatal("select on synctest channel from outside bubble") } } else { allSynctest = false diff --git a/src/runtime/set_vma_name_linux.go b/src/runtime/set_vma_name_linux.go index 100c2bfecac60c..792d6ad34d9102 100644 --- a/src/runtime/set_vma_name_linux.go +++ b/src/runtime/set_vma_name_linux.go @@ -14,9 +14,13 @@ import ( var prSetVMAUnsupported atomic.Bool +func setVMANameSupported() bool { + return !prSetVMAUnsupported.Load() +} + // setVMAName calls prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, start, len, name) func setVMAName(start unsafe.Pointer, length uintptr, name string) { - if debug.decoratemappings == 0 || prSetVMAUnsupported.Load() { + if debug.decoratemappings == 0 || !setVMANameSupported() { return } diff --git a/src/runtime/set_vma_name_stub.go b/src/runtime/set_vma_name_stub.go index 38f65fd592ba34..6cb01ebf50dcef 100644 --- a/src/runtime/set_vma_name_stub.go +++ b/src/runtime/set_vma_name_stub.go @@ -10,3 +10,5 @@ import "unsafe" // setVMAName isn’t implemented func setVMAName(start unsafe.Pointer, len uintptr, name string) {} + +func setVMANameSupported() bool { return false } diff --git a/src/runtime/synctest.go b/src/runtime/synctest.go index 16af1209b4a5f7..529f69fd9309b9 100644 --- a/src/runtime/synctest.go +++ b/src/runtime/synctest.go @@ -410,7 +410,9 @@ func getOrSetBubbleSpecial(p unsafe.Pointer, bubbleid uint64, add bool) (assoc i } else if add { // p is not associated with a bubble, // and we've been asked to add an association. + lock(&mheap_.speciallock) s := (*specialBubble)(mheap_.specialBubbleAlloc.alloc()) + unlock(&mheap_.speciallock) s.bubbleid = bubbleid s.special.kind = _KindSpecialBubble s.special.offset = offset diff --git a/src/runtime/tagptr_64bit.go b/src/runtime/tagptr_64bit.go index 3d79332e2dcaff..76733cc1d64630 100644 --- a/src/runtime/tagptr_64bit.go +++ b/src/runtime/tagptr_64bit.go @@ -22,10 +22,17 @@ const ( // On AMD64, virtual addresses are 48-bit (or 57-bit) sign-extended. // Other archs are 48-bit zero-extended. // + // We use one extra bit to placate systems which simulate amd64 binaries on + // an arm64 host. Allocated arm64 addresses could be as high as 1<<48-1, + // which would be invalid if we assumed 48-bit sign-extended addresses. + // See issue 69255. + // (Note that this does not help the other way around, simluating arm64 + // on amd64, but we don't have that problem at the moment.) + // // On s390x, virtual addresses are 64-bit. There's not much we // can do about this, so we just hope that the kernel doesn't // get to really high addresses and panic if it does. - defaultAddrBits = 48 + defaultAddrBits = 48 + 1 // On AIX, 64-bit addresses are split into 36-bit segment number and 28-bit // offset in segment. Segment numbers in the range 0x0A0000000-0x0AFFFFFFF(LSA) diff --git a/src/runtime/time.go b/src/runtime/time.go index 4880dce8cddc79..e9d1f0b6c9a10f 100644 --- a/src/runtime/time.go +++ b/src/runtime/time.go @@ -415,7 +415,7 @@ func newTimer(when, period int64, f func(arg any, seq uintptr, delay int64), arg //go:linkname stopTimer time.stopTimer func stopTimer(t *timeTimer) bool { if t.isFake && getg().bubble == nil { - panic("stop of synctest timer from outside bubble") + fatal("stop of synctest timer from outside bubble") } return t.stop() } @@ -430,7 +430,7 @@ func resetTimer(t *timeTimer, when, period int64) bool { racerelease(unsafe.Pointer(&t.timer)) } if t.isFake && getg().bubble == nil { - panic("reset of synctest timer from outside bubble") + fatal("reset of synctest timer from outside bubble") } return t.reset(when, period) } diff --git a/src/sync/atomic/type.go b/src/sync/atomic/type.go index 40a29fed8ce3bb..4a74150b41bace 100644 --- a/src/sync/atomic/type.go +++ b/src/sync/atomic/type.go @@ -232,7 +232,7 @@ func (x *Uintptr) Add(delta uintptr) (new uintptr) { return AddUintptr(&x.v, del func (x *Uintptr) And(mask uintptr) (old uintptr) { return AndUintptr(&x.v, mask) } // Or atomically performs a bitwise OR operation on x using the bitmask -// provided as mask and returns the updated value after the OR operation. +// provided as mask and returns the old value. func (x *Uintptr) Or(mask uintptr) (old uintptr) { return OrUintptr(&x.v, mask) } // noCopy may be added to structs which must not be copied diff --git a/test/fixedbugs/issue75569.go b/test/fixedbugs/issue75569.go new file mode 100644 index 00000000000000..8420641db2234a --- /dev/null +++ b/test/fixedbugs/issue75569.go @@ -0,0 +1,77 @@ +// run + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func fff(a []int, b bool, p, q *int) { +outer: + n := a[0] + a = a[1:] + switch n { + case 1: + goto one + case 2: + goto two + case 3: + goto three + case 4: + goto four + } + +one: + goto inner +two: + goto outer +three: + goto inner +four: + goto innerSideEntry + +inner: + n = a[0] + a = a[1:] + switch n { + case 1: + goto outer + case 2: + goto inner + case 3: + goto innerSideEntry + default: + return + } +innerSideEntry: + n = a[0] + a = a[1:] + switch n { + case 1: + goto outer + case 2: + goto inner + case 3: + goto inner + } + ggg(p, q) + goto inner +} + +var b bool + +func ggg(p, q *int) { + n := *p + 5 // this +5 ends up in the entry block, well before the *p load + if b { + *q = 0 + } + *p = n +} + +func main() { + var x, y int + fff([]int{4, 4, 4}, false, &x, &y) + if x != 5 { + panic(x) + } +} diff --git a/test/fixedbugs/issue76008.go b/test/fixedbugs/issue76008.go new file mode 100644 index 00000000000000..bdf273bca1e81f --- /dev/null +++ b/test/fixedbugs/issue76008.go @@ -0,0 +1,35 @@ +// run + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "runtime" + +func main() { + shouldPanic(func() { + g = any(func() {}) == any(func() {}) + }) + shouldPanic(func() { + g = any(map[int]int{}) == any(map[int]int{}) + }) + shouldPanic(func() { + g = any([]int{}) == any([]int{}) + }) +} + +var g bool + +func shouldPanic(f func()) { + defer func() { + err := recover() + if err == nil { + _, _, line, _ := runtime.Caller(2) + println("did not panic at line", line+1) + } + }() + + f() +}