Webux Lab - Blog
Webux Lab Logo

Webux Lab

By Studio Webux

Search

By Tommy Gingras

Last update 2026-02-27

K3sGolang

Go App with OpenTelemetry on K3s

End-to-end example deploying a Go application with full OpenTelemetry instrumentation — traces, metrics, and logs — to K3s. The image is built and pushed to the private registry, and telemetry is shipped to Alloy via OTLP. Pyroscope scrapes continuous pprof profiles automatically.

This is a practical stress-test app with two endpoints: a normal handler with random latency, and a /stress endpoint that triggers a burst of work to generate visible traces in Grafana.

Prerequisites

  • K3s running (see K3s Setup article)
  • Private registry deployed (see K3s Private Registry article)
  • Observability stack deployed (see K3s Observability Stack article)

Application

main.go

The app initializes three OTLP exporters (traces → Tempo, metrics → Prometheus via Alloy, logs → Loki via Alloy) using environment variables for the endpoint. It also exposes pprof on :6060 for Pyroscope scraping.

package main

import (
	"context"
	"fmt"
	"log"
	"math/rand"
	"net/http"
	_ "net/http/pprof"
	"os"
	"time"

	"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
	"go.opentelemetry.io/otel"
	"go.opentelemetry.io/otel/attribute"
	"go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc"
	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
	otellog "go.opentelemetry.io/otel/log"
	otelloglobal "go.opentelemetry.io/otel/log/global"
	"go.opentelemetry.io/otel/metric"
	sdklog "go.opentelemetry.io/otel/sdk/log"
	sdkmetric "go.opentelemetry.io/otel/sdk/metric"
	"go.opentelemetry.io/otel/sdk/resource"
	sdktrace "go.opentelemetry.io/otel/sdk/trace"
	semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
)

var (
	tracer    = otel.Tracer("stress-app")
	meter     = otel.Meter("stress-app")
	appLogger otellog.Logger
	counter   metric.Int64Counter
	latency   metric.Float64Histogram
)

func initOtel(ctx context.Context) (func(), error) {
	res := resource.NewWithAttributes(
		semconv.SchemaURL,
		semconv.ServiceName("stress-app"),
		semconv.ServiceVersion("1.0.0"),
		attribute.String("environment", "production"),
	)

	traceExp, err := otlptracegrpc.New(ctx)
	if err != nil {
		return nil, fmt.Errorf("trace exporter: %w", err)
	}
	tp := sdktrace.NewTracerProvider(
		sdktrace.WithBatcher(traceExp),
		sdktrace.WithResource(res),
		sdktrace.WithSampler(sdktrace.AlwaysSample()),
	)
	otel.SetTracerProvider(tp)
	tracer = tp.Tracer("stress-app")

	metricExp, err := otlpmetricgrpc.New(ctx)
	if err != nil {
		return nil, fmt.Errorf("metric exporter: %w", err)
	}
	mp := sdkmetric.NewMeterProvider(
		sdkmetric.WithReader(sdkmetric.NewPeriodicReader(metricExp)),
		sdkmetric.WithResource(res),
	)
	otel.SetMeterProvider(mp)
	meter = mp.Meter("stress-app")

	logExp, err := otlploggrpc.New(ctx)
	if err != nil {
		return nil, fmt.Errorf("log exporter: %w", err)
	}
	lp := sdklog.NewLoggerProvider(
		sdklog.WithProcessor(sdklog.NewBatchProcessor(logExp)),
		sdklog.WithResource(res),
	)
	otelloglobal.SetLoggerProvider(lp)
	appLogger = lp.Logger("stress-app")

	counter, _ = meter.Int64Counter("http.requests.total",
		metric.WithDescription("Total HTTP requests"),
	)
	latency, _ = meter.Float64Histogram("http.request.duration",
		metric.WithDescription("HTTP request duration in ms"),
		metric.WithUnit("ms"),
	)

	return func() {
		tp.Shutdown(ctx)
		mp.Shutdown(ctx)
		lp.Shutdown(ctx)
	}, nil
}

func main() {
	ctx := context.Background()

	shutdown, err := initOtel(ctx)
	if err != nil {
		log.Fatalf("failed to init otel: %v", err)
	}
	defer shutdown()

	go func() {
		http.ListenAndServe(":6060", nil) // pprof
	}()

	mux := http.NewServeMux()
	mux.HandleFunc("/", handleRequest)
	mux.HandleFunc("/stress", handleStress)
	mux.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) {
		w.WriteHeader(http.StatusOK)
		fmt.Fprint(w, "ok")
	})

	handler := otelhttp.NewHandler(mux, "stress-app")
	http.ListenAndServe(":8080", handler)
}

go.mod

module stress-app

go 1.25.0

require (
	go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0
	go.opentelemetry.io/otel v1.40.0
	go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.16.0
	go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0
	go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0
	go.opentelemetry.io/otel/log v0.16.0
	go.opentelemetry.io/otel/metric v1.40.0
	go.opentelemetry.io/otel/sdk v1.40.0
	go.opentelemetry.io/otel/sdk/log v0.16.0
	go.opentelemetry.io/otel/sdk/metric v1.40.0
)

Dockerfile

Multi-stage build — the final image contains only the static binary.

FROM golang:1.25-alpine AS builder
WORKDIR /app
COPY go.mod go.sum ./
RUN go mod download
COPY . .
RUN CGO_ENABLED=0 GOOS=linux go build -o stress-app .

FROM alpine:latest
WORKDIR /app
COPY --from=builder /app/stress-app .
EXPOSE 8080 6060
CMD ["./stress-app"]

Build and Push

docker build -t registry.webux.dev/stress-app:latest .
docker push registry.webux.dev/stress-app:latest

Deploy to K3s

k8s.yaml

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: stress-app
  namespace: webux-dev
spec:
  replicas: 1
  selector:
    matchLabels:
      app: stress-app
  template:
    metadata:
      labels:
        app: stress-app
      annotations:
        # Enable Pyroscope pprof scraping
        profiles.grafana.com/scrape: "true"
        profiles.grafana.com/port: "6060"
    spec:
      containers:
        - name: stress-app
          image: registry.webux.dev/stress-app:latest
          ports:
            - containerPort: 8080
              name: http
            - containerPort: 6060
              name: pprof
          env:
            - name: OTEL_EXPORTER_OTLP_ENDPOINT
              value: "http://alloy.monitoring.svc.cluster.local:4317"
            - name: OTEL_SERVICE_NAME
              value: "stress-app"
            - name: OTEL_RESOURCE_ATTRIBUTES
              value: "deployment.environment=production,k8s.namespace.name=webux-dev"
          resources:
            requests:
              cpu: 50m
              memory: 64Mi
            limits:
              cpu: 500m
              memory: 128Mi
---
apiVersion: v1
kind: Service
metadata:
  name: stress-app
  namespace: webux-dev
spec:
  selector:
    app: stress-app
  ports:
    - name: http
      port: 80
      targetPort: 8080
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
  name: stress-cert
  namespace: webux-dev
spec:
  secretName: stress-tls
  issuerRef:
    name: cloudflare-cluster-issuer
    kind: ClusterIssuer
  dnsNames:
    - stress.webux.dev
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
  name: stress-app
  namespace: webux-dev
spec:
  entryPoints:
    - websecure
  routes:
    - match: Host(`stress.webux.dev`)
      kind: Rule
      services:
        - name: stress-app
          port: 80
  tls:
    secretName: stress-tls
kubectl apply -f k8s.yaml

Testing

# Normal request
curl https://stress.webux.dev/

# Trigger a burst of 20 iterations to generate traces
curl https://stress.webux.dev/stress

What to look for in Grafana

  • Explore → Tempo: search for stress-app traces, drill into stress-loop spans
  • Explore → Loki: filter {service_name="stress-app"} to see structured logs
  • Dashboards → Node Exporter: CPU/memory on the node
  • Pyroscope: CPU and memory flamegraphs for the Go process

Key Points

  • OTEL_EXPORTER_OTLP_ENDPOINT is all that is needed — the SDK reads it automatically
  • No imagePullSecret is needed because k3s handles registry auth via registries.yaml
  • The pprof annotations on the pod tell Alloy's Pyroscope scraper where to find the profiling endpoint
  • AlwaysSample() is used here for demo purposes — use a ratio sampler in high-traffic production setups