Loading...
Loading...

Go Goroutines Tutorial

Goroutines are the fundamental building blocks of concurrent programming in Go. They are lightweight threads managed by the Go runtime that enable you to write highly concurrent programs efficiently. This tutorial will guide you from basic goroutine usage to advanced patterns.

1. Goroutine Basics

1.1 Starting Goroutines

Goroutines are started by prefixing a function call with the go keyword. They run concurrently with the calling function.

func say(s string) {
    for i := 0; i < 3; i++ {
        time.Sleep(100 * time.Millisecond)
        fmt.Println(s)
    }
}

func main() {
    // Run in main goroutine
    say("direct call")
    
    // Run in new goroutine
    go say("goroutine")
    
    // Run anonymous function in goroutine
    go func(msg string) {
        fmt.Println(msg)
    }("going")
    
    // Wait for goroutines to finish
    time.Sleep(time.Second)
    fmt.Println("done")
}

1.2 Goroutine Characteristics

Key properties of goroutines:

  • Lightweight: Start with small 2KB stacks that grow as needed
  • Cheap: Can create thousands or even millions
  • Multiplexed: Many goroutines map to fewer OS threads
  • Non-blocking: Blocked goroutines don't block underlying threads

1.3 Goroutine Scheduling

Go uses an M:N scheduler that multiplexes goroutines onto OS threads:

// Demonstration of goroutine scheduling
func numbers() {
    for i := 1; i <= 5; i++ {
        time.Sleep(250 * time.Millisecond)
        fmt.Printf("%d ", i)
    }
}

func letters() {
    for i := 'a'; i <= 'e'; i++ {
        time.Sleep(400 * time.Millisecond)
        fmt.Printf("%c ", i)
    }
}

func main() {
    go numbers()
    go letters()
    time.Sleep(3 * time.Second)
    fmt.Println("\nmain terminated")
}

2. Goroutine Communication

2.1 Using Channels

Channels are the preferred way to communicate between goroutines.

func sum(s []int, c chan int) {
    sum := 0
    for _, v := range s {
        sum += v
    }
    c <- sum // send sum to c
}

func main() {
    s := []int{7, 2, 8, -9, 4, 0}
    
    c := make(chan int)
    go sum(s[:len(s)/2], c)
    go sum(s[len(s)/2:], c)
    
    x, y := <-c, <-c // receive from c
    
    fmt.Println(x, y, x+y) // -5 17 12
}

2.2 Buffered Channels

Buffered channels allow sending without immediate receivers.

func main() {
    // Channel with buffer size 2
    ch := make(chan int, 2)
    
    // Sends don't block until buffer is full
    ch <- 1
    ch <- 2
    
    // Receives work as usual
    fmt.Println(<-ch) // 1
    fmt.Println(<-ch) // 2
}

2.3 Closing Channels

Senders can close channels to indicate no more values will be sent.

func fibonacci(n int, c chan int) {
    x, y := 0, 1
    for i := 0; i < n; i++ {
        c <- x
        x, y = y, x+y
    }
    close(c)
}

func main() {
    c := make(chan int, 10)
    go fibonacci(cap(c), c)
    
    // Receive values until channel is closed
    for i := range c {
        fmt.Println(i)
    }
}

3. Synchronization

3.1 WaitGroups

sync.WaitGroup waits for a collection of goroutines to finish.

func worker(id int, wg *sync.WaitGroup) {
    defer wg.Done() // Decrement counter when done
    
    fmt.Printf("Worker %d starting\n", id)
    time.Sleep(time.Second)
    fmt.Printf("Worker %d done\n", id)
}

func main() {
    var wg sync.WaitGroup
    
    for i := 1; i <= 5; i++ {
        wg.Add(1) // Increment counter
        go worker(i, &wg)
    }
    
    wg.Wait() // Block until counter is zero
}

3.2 Mutexes

Mutexes protect shared state from concurrent access.

type SafeCounter struct {
    mu    sync.Mutex
    value int
}

func (c *SafeCounter) Inc() {
    c.mu.Lock()
    defer c.mu.Unlock()
    c.value++
}

func (c *SafeCounter) Value() int {
    c.mu.Lock()
    defer c.mu.Unlock()
    return c.value
}

3.3 Once

sync.Once ensures initialization code runs exactly once.

var (
    once sync.Once
    instance *Singleton
)

func getInstance() *Singleton {
    once.Do(func() {
        instance = &Singleton{}
    })
    return instance
}

4. Goroutine Patterns

4.1 Worker Pools

A pool of goroutines processing work from a channel.

func worker(id int, jobs <-chan int, results chan<- int) {
    for j := range jobs {
        fmt.Printf("worker %d started job %d\n", id, j)
        time.Sleep(time.Second)
        fmt.Printf("worker %d finished job %d\n", id, j)
        results <- j * 2
    }
}

func main() {
    jobs := make(chan int, 100)
    results := make(chan int, 100)
    
    // Start 3 workers
    for w := 1; w <= 3; w++ {
        go worker(w, jobs, results)
    }
    
    // Send 5 jobs
    for j := 1; j <= 5; j++ {
        jobs <- j
    }
    close(jobs)
    
    // Collect results
    for a := 1; a <= 5; a++ {
        <-results
    }
}

4.2 Fan-out/Fan-in

Distribute work to multiple goroutines and combine results.

func producer(nums ...int) <-chan int {
    out := make(chan int)
    go func() {
        for _, n := range nums {
            out <- n
        }
        close(out)
    }()
    return out
}

func square(in <-chan int) <-chan int {
    out := make(chan int)
    go func() {
        for n := range in {
            out <- n * n
        }
        close(out)
    }()
    return out
}

func merge(cs ...<-chan int) <-chan int {
    var wg sync.WaitGroup
    out := make(chan int)
    
    output := func(c <-chan int) {
        for n := range c {
            out <- n
        }
        wg.Done()
    }
    
    wg.Add(len(cs))
    for _, c := range cs {
        go output(c)
    }
    
    go func() {
        wg.Wait()
        close(out)
    }()
    return out
}

func main() {
    in := producer(1, 2, 3, 4)
    
    // Fan-out
    c1 := square(in)
    c2 := square(in)
    
    // Fan-in
    for n := range merge(c1, c2) {
        fmt.Println(n) // 1, 4, 9, 16
    }
}

4.3 Pipeline

Chain goroutines together in a processing pipeline.

func gen(nums ...int) <-chan int {
    out := make(chan int)
    go func() {
        for _, n := range nums {
            out <- n
        }
        close(out)
    }()
    return out
}

func sq(in <-chan int) <-chan int {
    out := make(chan int)
    go func() {
        for n := range in {
            out <- n * n
        }
        close(out)
    }()
    return out
}

func main() {
    // Set up the pipeline
    c := gen(2, 3)
    out := sq(c)
    
    // Consume the output
    fmt.Println(<-out) // 4
    fmt.Println(<-out) // 9
}

5. Advanced Topics

5.1 Context

context package manages deadlines, cancellations across goroutines.

func worker(ctx context.Context, wg *sync.WaitGroup) {
    defer wg.Done()
    
    for {
        select {
        case <-ctx.Done():
            fmt.Println("Worker cancelled")
            return
        default:
            // Do work
            fmt.Println("Working...")
            time.Sleep(500 * time.Millisecond)
        }
    }
}

func main() {
    ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
    defer cancel()
    
    var wg sync.WaitGroup
    wg.Add(1)
    go worker(ctx, &wg)
    
    wg.Wait()
}

5.2 Select Statement

select lets a goroutine wait on multiple channel operations.

func main() {
    c1 := make(chan string)
    c2 := make(chan string)
    
    go func() {
        time.Sleep(1 * time.Second)
        c1 <- "one"
    }()
    
    go func() {
        time.Sleep(2 * time.Second)
        c2 <- "two"
    }()
    
    for i := 0; i < 2; i++ {
        select {
        case msg1 := <-c1:
            fmt.Println("received", msg1)
        case msg2 := <-c2:
            fmt.Println("received", msg2)
        }
    }
}

5.3 Atomic Operations

sync/atomic provides low-level atomic memory operations.

type AtomicCounter struct {
    count int64
}

func (c *AtomicCounter) Inc() {
    atomic.AddInt64(&c.count, 1)
}

func (c *AtomicCounter) Value() int64 {
    return atomic.LoadInt64(&c.count)
}

6. Common Pitfalls

6.1 Goroutine Leaks

Goroutines can leak if they're blocked indefinitely.

// Leaking goroutine example
func leak() {
    ch := make(chan int)
    go func() {
        val := <-ch // Blocked forever
        fmt.Println(val)
    }()
    return // Channel never gets sent to
}

// Fixed version
func noLeak() {
    ch := make(chan int)
    done := make(chan bool)
    
    go func() {
        val := <-ch
        fmt.Println(val)
        done <- true
    }()
    
    ch <- 42 // Send value
    <-done   // Wait for completion
}

6.2 Race Conditions

Data races occur when goroutines access shared data without synchronization.

// Race condition example
var counter int

func increment() {
    counter++
}

func main() {
    for i := 0; i < 1000; i++ {
        go increment()
    }
    time.Sleep(1 * time.Second)
    fmt.Println(counter) // May not be 1000
}

// Fixed with mutex
var (
    counter int
    mu      sync.Mutex
)

func safeIncrement() {
    mu.Lock()
    defer mu.Unlock()
    counter++
}

6.3 Deadlocks

Deadlocks occur when goroutines wait for each other indefinitely.

// Deadlock example
func main() {
    ch := make(chan int)
    ch <- 42      // Send blocks until someone receives
    val := <-ch   // But we never get here
    fmt.Println(val)
}

// Fixed by using buffered channel or separate goroutines
func noDeadlock() {
    ch := make(chan int, 1) // Buffered channel
    ch <- 42
    val := <-ch
    fmt.Println(val)
}

7. Best Practices

7.1 Goroutine Design Principles

Key principles for effective goroutine usage:

  • Keep goroutines focused: Each should do one thing well
  • Use channels for communication: Don't share memory directly
  • Be explicit about ownership: Clearly define which goroutine owns which data
  • Manage lifetimes: Know when goroutines start and stop

7.2 Error Handling

Proper error handling in concurrent programs:

func worker(id int, jobs <-chan int, results chan<- int, errChan chan<- error) {
    for j := range jobs {
        if j < 0 {
            errChan <- fmt.Errorf("invalid job %d", j)
            continue
        }
        results <- j * 2
    }
}

func main() {
    jobs := make(chan int, 10)
    results := make(chan int, 10)
    errChan := make(chan error, 10)
    
    go worker(1, jobs, results, errChan)
    
    jobs <- 2
    jobs <- -1 // Will cause error
    close(jobs)
    
    select {
    case err := <-errChan:
        fmt.Println("Error:", err)
    case res := <-results:
        fmt.Println("Result:", res)
    }
}

7.3 Debugging Techniques

Tools and techniques for debugging goroutines:

  • go run -race: Detects race conditions
  • runtime.NumGoroutine(): Counts active goroutines
  • Logging with goroutine IDs
  • pprof for performance analysis
0 Interaction
0 Views
Views
0 Likes
×
×
×
🍪 CookieConsent@Ptutorials:~

Welcome to Ptutorials

$ Allow cookies on this site ? (y/n)

top-home