chore: bump minimum Go version to 1.25 (#7788)

This commit is contained in:
Ville Vesilehto
2026-02-16 15:28:30 +02:00
committed by GitHub
parent b1080a2934
commit 6fd38dca06
9 changed files with 26 additions and 50 deletions

View File

@@ -57,7 +57,7 @@ out-of-tree plugins.
To compile CoreDNS, we assume you have a working Go setup. See various tutorials if you dont have To compile CoreDNS, we assume you have a working Go setup. See various tutorials if you dont have
that already configured. that already configured.
First, make sure your golang version is 1.24.0 or higher as `go mod` support and other api is needed. First, make sure your golang version is 1.25.0 or higher as `go mod` support and other api is needed.
See [here](https://github.com/golang/go/wiki/Modules) for `go mod` details. See [here](https://github.com/golang/go/wiki/Modules) for `go mod` details.
Then, check out the project and run `make` to compile the binary: Then, check out the project and run `make` to compile the binary:

View File

@@ -229,11 +229,9 @@ func (s *Server) Stop() error {
continue continue
} }
wg.Add(1) wg.Go(func() {
go func() {
s1.ShutdownContext(ctx) s1.ShutdownContext(ctx)
wg.Done() })
}()
} }
s.m.Unlock() s.m.Unlock()
wg.Wait() wg.Wait()

2
go.mod
View File

@@ -2,7 +2,7 @@ module github.com/coredns/coredns
// Note this minimum version requirement. CoreDNS supports the last two // Note this minimum version requirement. CoreDNS supports the last two
// Go versions. This follows the upstream Go project support. // Go versions. This follows the upstream Go project support.
go 1.24.0 go 1.25.0
require ( require (
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible github.com/Azure/azure-sdk-for-go v68.0.0+incompatible

View File

@@ -363,13 +363,11 @@ func TestCloudDNSConcurrentServeDNS(t *testing.T) {
var wg sync.WaitGroup var wg sync.WaitGroup
// Concurrently refresh zones to race with Lookup reads. // Concurrently refresh zones to race with Lookup reads.
wg.Add(1) wg.Go(func() {
go func() {
defer wg.Done()
for range 50 { for range 50 {
_ = r.updateZones(ctx) _ = r.updateZones(ctx)
} }
}() })
const workers = 32 const workers = 32
const iterations = 200 const iterations = 200

View File

@@ -67,11 +67,9 @@ func TestTransport(t *testing.T) {
} }
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(1) wg.Go(func() {
go func() {
accept(t, l, 1) accept(t, l, 1)
wg.Done() })
}()
dio := newIO(param[0], l.Addr().String(), 1, 1) dio := newIO(param[0], l.Addr().String(), 1, 1)
dio.tcpTimeout = 10 * time.Millisecond dio.tcpTimeout = 10 * time.Millisecond
@@ -97,11 +95,9 @@ func TestRace(t *testing.T) {
defer l.Close() defer l.Close()
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(1) wg.Go(func() {
go func() {
accept(t, l, count) accept(t, l, count)
wg.Done() })
}()
dio := newIO("tcp", l.Addr().String(), 1, 1) dio := newIO("tcp", l.Addr().String(), 1, 1)
dio.tcpTimeout = 10 * time.Millisecond dio.tcpTimeout = 10 * time.Millisecond
@@ -132,11 +128,9 @@ func TestReconnect(t *testing.T) {
} }
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(1) wg.Go(func() {
go func() {
accept(t, l, 1) accept(t, l, 1)
wg.Done() })
}()
addr := l.Addr().String() addr := l.Addr().String()
logger := MockLogger{} logger := MockLogger{}
@@ -164,11 +158,9 @@ func TestReconnect(t *testing.T) {
} }
defer l.Close() defer l.Close()
wg.Add(1) wg.Go(func() {
go func() {
accept(t, l, 1) accept(t, l, 1)
wg.Done() })
}()
messageCount := 5 messageCount := 5
for range messageCount { for range messageCount {
@@ -249,11 +241,9 @@ func TestFullQueueWriteFail(t *testing.T) {
defer l.Close() defer l.Close()
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(1) wg.Go(func() {
go func() {
accept(t, l, 1) accept(t, l, 1)
wg.Done() })
}()
logger := MockLogger{} logger := MockLogger{}
dio := newIO("unix", l.Addr().String(), 1, 1) dio := newIO("unix", l.Addr().String(), 1, 1)

View File

@@ -48,10 +48,8 @@ func xfr(state request.Request, truncate bool) {
}() }()
wg := new(sync.WaitGroup) wg := new(sync.WaitGroup)
wg.Add(1) wg.Go(func() {
go func() {
tr.Out(state.W, state.Req, ch) tr.Out(state.W, state.Req, ch)
wg.Done() })
}()
wg.Wait() wg.Wait()
} }

View File

@@ -102,12 +102,10 @@ func TestShardEvictParallel(t *testing.T) {
start := make(chan struct{}) start := make(chan struct{})
var wg sync.WaitGroup var wg sync.WaitGroup
for range shardSize { for range shardSize {
wg.Add(1) wg.Go(func() {
go func() {
<-start <-start
s.Evict() s.Evict()
wg.Done() })
}()
} }
close(start) // start evicting in parallel close(start) // start evicting in parallel
wg.Wait() wg.Wait()

View File

@@ -124,30 +124,26 @@ func TestConcurrentAccess(t *testing.T) {
// Test concurrent Int() calls // Test concurrent Int() calls
for range numGoroutines { for range numGoroutines {
wg.Add(1) wg.Go(func() {
go func() {
defer wg.Done()
for range numOperations { for range numOperations {
val := r.Int() val := r.Int()
if val < 0 { if val < 0 {
errors <- nil errors <- nil
} }
} }
}() })
} }
// Test concurrent Perm() calls // Test concurrent Perm() calls
for range numGoroutines { for range numGoroutines {
wg.Add(1) wg.Go(func() {
go func() {
defer wg.Done()
for range numOperations { for range numOperations {
perm := r.Perm(5) perm := r.Perm(5)
if len(perm) != 5 { if len(perm) != 5 {
errors <- nil errors <- nil
} }
} }
}() })
} }
wg.Wait() wg.Wait()

View File

@@ -64,8 +64,7 @@ func TestDoDupSuppress(t *testing.T) {
const n = 10 const n = 10
var wg sync.WaitGroup var wg sync.WaitGroup
for range n { for range n {
wg.Add(1) wg.Go(func() {
go func() {
v, err := g.Do(1, fn) v, err := g.Do(1, fn)
if err != nil { if err != nil {
t.Errorf("Do error: %v", err) t.Errorf("Do error: %v", err)
@@ -73,8 +72,7 @@ func TestDoDupSuppress(t *testing.T) {
if v.(string) != "bar" { if v.(string) != "bar" {
t.Errorf("Got %q; want %q", v, "bar") t.Errorf("Got %q; want %q", v, "bar")
} }
wg.Done() })
}()
} }
time.Sleep(100 * time.Millisecond) // let goroutines above block time.Sleep(100 * time.Millisecond) // let goroutines above block
c <- "bar" c <- "bar"