2 Commits
v0.8.2 ... main

16 changed files with 1686 additions and 68 deletions

View File

@@ -49,7 +49,7 @@ structure end-to-end.
`feedkit` currently includes:
- strict YAML config loading and validation
- polling and streaming source abstractions
- scheduler orchestration for configured sources
- scheduler orchestration for configured sources and supervised stream workers
- optional pipeline processors
- built-in dedupe and normalization processors
- route compilation and sink fanout

3
doc.go
View File

@@ -23,7 +23,8 @@
// reusable source helpers.
//
// - scheduler
// Runs configured sources on a cadence or as long-lived stream workers.
// Runs configured sources on a cadence and supervises long-lived stream
// workers with restart/fatal handling.
//
// - processors
// Defines the generic processor interface and registry used to build

View File

@@ -0,0 +1,67 @@
package postgres
import (
"context"
"database/sql"
"fmt"
"net/url"
"strings"
"time"
_ "github.com/lib/pq"
)
const initTimeout = 5 * time.Second
var (
sqlOpen = sql.Open
pingDB = func(ctx context.Context, db *sql.DB) error { return db.PingContext(ctx) }
)
// ConnConfig describes the minimal connection settings shared by feedkit's
// Postgres readers and writers.
type ConnConfig struct {
URI string
Username string
Password string
}
// BuildDSN validates a Postgres URI and injects credentials into it.
func BuildDSN(cfg ConnConfig) (string, error) {
u, err := url.Parse(strings.TrimSpace(cfg.URI))
if err != nil {
return "", fmt.Errorf("invalid uri: %w", err)
}
if u.Scheme == "" {
return "", fmt.Errorf("invalid uri: missing scheme")
}
if u.Host == "" {
return "", fmt.Errorf("invalid uri: missing host")
}
u.User = url.UserPassword(cfg.Username, cfg.Password)
return u.String(), nil
}
// Open builds a DSN, opens a database handle, and verifies connectivity with a
// bounded ping before returning the handle.
func Open(ctx context.Context, cfg ConnConfig) (*sql.DB, error) {
dsn, err := BuildDSN(cfg)
if err != nil {
return nil, err
}
db, err := sqlOpen("postgres", dsn)
if err != nil {
return nil, err
}
pingCtx, cancel := context.WithTimeout(ctx, initTimeout)
defer cancel()
if err := pingDB(pingCtx, db); err != nil {
_ = db.Close()
return nil, err
}
return db, nil
}

View File

@@ -0,0 +1,165 @@
package postgres
import (
"context"
"database/sql"
"database/sql/driver"
"errors"
"io"
"net/url"
"strings"
"sync"
"testing"
)
func withPostgresPackageTestState(t *testing.T) {
t.Helper()
oldSQLOpen := sqlOpen
oldPingDB := pingDB
t.Cleanup(func() {
sqlOpen = oldSQLOpen
pingDB = oldPingDB
})
}
func TestBuildDSNInjectsCredentials(t *testing.T) {
dsn, err := BuildDSN(ConnConfig{
URI: " postgres://db.example.local:5432/feedkit?sslmode=disable ",
Username: "app_user",
Password: "app_pass",
})
if err != nil {
t.Fatalf("BuildDSN() error = %v", err)
}
u, err := url.Parse(dsn)
if err != nil {
t.Fatalf("url.Parse() error = %v", err)
}
if u.User == nil || u.User.Username() != "app_user" {
t.Fatalf("username = %q, want app_user", u.User.Username())
}
pass, ok := u.User.Password()
if !ok || pass != "app_pass" {
t.Fatalf("password = %q, want app_pass", pass)
}
}
func TestBuildDSNRejectsInvalidURI(t *testing.T) {
_, err := BuildDSN(ConnConfig{URI: "http://[::1", Username: "u", Password: "p"})
if err == nil {
t.Fatalf("BuildDSN() error = nil, want error")
}
if !strings.Contains(err.Error(), "invalid uri") {
t.Fatalf("BuildDSN() error = %q", err)
}
}
func TestBuildDSNRejectsMissingScheme(t *testing.T) {
_, err := BuildDSN(ConnConfig{URI: "//db.example.local/feedkit", Username: "u", Password: "p"})
if err == nil {
t.Fatalf("BuildDSN() error = nil, want error")
}
if !strings.Contains(err.Error(), "missing scheme") {
t.Fatalf("BuildDSN() error = %q", err)
}
}
func TestBuildDSNRejectsMissingHost(t *testing.T) {
_, err := BuildDSN(ConnConfig{URI: "postgres:///feedkit", Username: "u", Password: "p"})
if err == nil {
t.Fatalf("BuildDSN() error = nil, want error")
}
if !strings.Contains(err.Error(), "missing host") {
t.Fatalf("BuildDSN() error = %q", err)
}
}
func TestOpenPropagatesOpenFailure(t *testing.T) {
withPostgresPackageTestState(t)
sqlOpen = func(_, _ string) (*sql.DB, error) {
return nil, errors.New("open failed")
}
_, err := Open(context.Background(), ConnConfig{
URI: "postgres://db.example.local/feedkit",
Username: "u",
Password: "p",
})
if err == nil {
t.Fatalf("Open() error = nil, want error")
}
if !strings.Contains(err.Error(), "open failed") {
t.Fatalf("Open() error = %q", err)
}
}
func TestOpenPropagatesPingFailure(t *testing.T) {
withPostgresPackageTestState(t)
const driverName = "feedkit_internal_postgres_ping_fail"
registerPingTestDriver(driverName, errors.New("ping failed"))
sqlOpen = func(_, _ string) (*sql.DB, error) {
return sql.Open(driverName, "")
}
_, err := Open(context.Background(), ConnConfig{
URI: "postgres://db.example.local/feedkit",
Username: "u",
Password: "p",
})
if err == nil {
t.Fatalf("Open() error = nil, want error")
}
if !strings.Contains(err.Error(), "ping failed") {
t.Fatalf("Open() error = %q", err)
}
}
var (
pingDriverMu sync.Mutex
pingDriverSeen = map[string]bool{}
)
func registerPingTestDriver(name string, pingErr error) {
pingDriverMu.Lock()
defer pingDriverMu.Unlock()
if pingDriverSeen[name] {
return
}
sql.Register(name, &pingTestDriver{pingErr: pingErr})
pingDriverSeen[name] = true
}
type pingTestDriver struct {
pingErr error
}
func (d *pingTestDriver) Open(string) (driver.Conn, error) {
return &pingTestConn{pingErr: d.pingErr}, nil
}
type pingTestConn struct {
pingErr error
}
func (c *pingTestConn) Prepare(string) (driver.Stmt, error) {
return nil, errors.New("not implemented")
}
func (c *pingTestConn) Close() error { return nil }
func (c *pingTestConn) Begin() (driver.Tx, error) { return nil, errors.New("not implemented") }
func (c *pingTestConn) Ping(context.Context) error { return c.pingErr }
func (c *pingTestConn) QueryContext(context.Context, string, []driver.NamedValue) (driver.Rows, error) {
return &pingTestRows{}, nil
}
type pingTestRows struct{}
func (r *pingTestRows) Columns() []string { return []string{"ok"} }
func (r *pingTestRows) Close() error { return nil }
func (r *pingTestRows) Next([]driver.Value) error { return io.EOF }

25
scheduler/doc.go Normal file
View File

@@ -0,0 +1,25 @@
// Package scheduler runs feedkit sources and forwards their events to the
// daemon event bus.
//
// External API surface:
// - Scheduler: runs configured polling and streaming jobs
// - Job: one scheduler task bound to a source
// - StreamExitPolicy: stream supervision policy for non-fatal exits
// - StreamBackoff: restart pacing for supervised stream sources
//
// Optional helpers from helpers.go:
// - JobFromSourceConfig: build a scheduler job from a configured source and
// feedkit-owned scheduling params
//
// Poll sources are run on a fixed cadence with optional jitter. Stream sources
// are supervised long-lived workers. Their generic feedkit controls live under
// sources[].params:
// - stream_exit_policy: restart|stop|fatal (default restart)
// - stream_backoff_initial: positive duration (default 1s)
// - stream_backoff_max: positive duration (default 1m)
// - stream_backoff_jitter: non-negative duration (default 250ms)
//
// Stream sources can classify exits with sources.StreamRetryable and
// sources.StreamFatal. Plain errors are treated as retryable by default, while
// fatal exits are propagated from Scheduler.Run so the daemon can shut down.
package scheduler

138
scheduler/helpers.go Normal file
View File

@@ -0,0 +1,138 @@
package scheduler
import (
"fmt"
"strings"
"time"
"gitea.maximumdirect.net/ejr/feedkit/config"
"gitea.maximumdirect.net/ejr/feedkit/sources"
)
// JobFromSourceConfig builds a scheduler Job from a configured source and its
// generic feedkit config.
func JobFromSourceConfig(src sources.Input, cfg config.SourceConfig) (Job, error) {
if src == nil {
return Job{}, fmt.Errorf("scheduler: source %q is nil", cfg.Name)
}
job := Job{
Source: src,
Every: cfg.Every.Duration,
}
if _, ok := src.(sources.StreamSource); ok {
if cfg.Every.Duration > 0 {
return Job{}, fmt.Errorf("source %q: sources[].every must be omitted for stream sources", cfg.Name)
}
policy, err := parseStreamExitPolicy(cfg)
if err != nil {
return Job{}, err
}
backoff, err := parseStreamBackoff(cfg)
if err != nil {
return Job{}, err
}
job.StreamExitPolicy = policy
job.StreamBackoff = backoff
return job, nil
}
if _, ok := src.(sources.PollSource); ok {
if cfg.Every.Duration <= 0 {
return Job{}, fmt.Errorf("source %q: sources[].every must be > 0 for polling sources", cfg.Name)
}
if err := rejectStreamParams(cfg); err != nil {
return Job{}, err
}
return job, nil
}
return Job{}, fmt.Errorf("scheduler: source %q implements neither PollSource nor StreamSource", cfg.Name)
}
func parseStreamExitPolicy(cfg config.SourceConfig) (StreamExitPolicy, error) {
const key = "stream_exit_policy"
raw, exists := cfg.Params[key]
if !exists || raw == nil {
return StreamExitPolicyRestart, nil
}
s, ok := raw.(string)
if !ok {
return "", fmt.Errorf("source %q: params.%s must be one of: restart, stop, fatal", cfg.Name, key)
}
switch StreamExitPolicy(strings.ToLower(strings.TrimSpace(s))) {
case StreamExitPolicyRestart:
return StreamExitPolicyRestart, nil
case StreamExitPolicyStop:
return StreamExitPolicyStop, nil
case StreamExitPolicyFatal:
return StreamExitPolicyFatal, nil
default:
return "", fmt.Errorf("source %q: params.%s must be one of: restart, stop, fatal", cfg.Name, key)
}
}
func parseStreamBackoff(cfg config.SourceConfig) (StreamBackoff, error) {
initial, err := parsePositiveOrDefaultDuration(cfg, "stream_backoff_initial", defaultStreamBackoffInitial)
if err != nil {
return StreamBackoff{}, err
}
max, err := parsePositiveOrDefaultDuration(cfg, "stream_backoff_max", defaultStreamBackoffMax)
if err != nil {
return StreamBackoff{}, err
}
jitter, err := parseNonNegativeOrDefaultDuration(cfg, "stream_backoff_jitter", defaultStreamBackoffJitter)
if err != nil {
return StreamBackoff{}, err
}
if max < initial {
return StreamBackoff{}, fmt.Errorf("source %q: params.stream_backoff_max must be >= params.stream_backoff_initial", cfg.Name)
}
return StreamBackoff{
Initial: initial,
Max: max,
Jitter: jitter,
}, nil
}
func rejectStreamParams(cfg config.SourceConfig) error {
streamKeys := []string{
"stream_exit_policy",
"stream_backoff_initial",
"stream_backoff_max",
"stream_backoff_jitter",
}
for _, key := range streamKeys {
if _, ok := cfg.Params[key]; ok {
return fmt.Errorf("source %q: params.%s is only valid for stream sources", cfg.Name, key)
}
}
return nil
}
func parsePositiveOrDefaultDuration(cfg config.SourceConfig, key string, def time.Duration) (time.Duration, error) {
if _, exists := cfg.Params[key]; !exists {
return def, nil
}
v, ok := cfg.ParamDuration(key)
if !ok || v <= 0 {
return 0, fmt.Errorf("source %q: params.%s must be a positive duration", cfg.Name, key)
}
return v, nil
}
func parseNonNegativeOrDefaultDuration(cfg config.SourceConfig, key string, def time.Duration) (time.Duration, error) {
if _, exists := cfg.Params[key]; !exists {
return def, nil
}
v, ok := cfg.ParamDuration(key)
if !ok || v < 0 {
return 0, fmt.Errorf("source %q: params.%s must be a non-negative duration", cfg.Name, key)
}
return v, nil
}

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"hash/fnv"
"math/rand"
"sync"
"time"
"gitea.maximumdirect.net/ejr/feedkit/event"
@@ -28,8 +29,10 @@ type Logger = logging.Logf
// - For stream sources: Jitter is applied once at startup only (optional; useful to avoid
// reconnect storms when many instances start together).
type Job struct {
Source sources.Input
Every time.Duration
Source sources.Input
Every time.Duration
StreamExitPolicy StreamExitPolicy
StreamBackoff StreamBackoff
// Jitter is the maximum additional delay added before each poll.
// Example: if Every=15m and Jitter=30s, each poll will occur at:
@@ -41,12 +44,37 @@ type Job struct {
Jitter time.Duration
}
// StreamExitPolicy controls how the scheduler handles non-fatal stream exits.
type StreamExitPolicy string
const (
StreamExitPolicyRestart StreamExitPolicy = "restart"
StreamExitPolicyStop StreamExitPolicy = "stop"
StreamExitPolicyFatal StreamExitPolicy = "fatal"
)
// StreamBackoff controls restart pacing for stream supervision.
type StreamBackoff struct {
Initial time.Duration
Max time.Duration
Jitter time.Duration
}
type Scheduler struct {
Jobs []Job
Out chan<- event.Event
Logf Logger
}
const (
defaultStreamBackoffInitial = 1 * time.Second
defaultStreamBackoffMax = 1 * time.Minute
defaultStreamBackoffJitter = 250 * time.Millisecond
streamBackoffResetAfter = 5 * time.Minute
)
var timeNow = time.Now
// Run starts one goroutine per job.
// Poll jobs run on their own interval and emit 0..N events per poll.
// Stream jobs run continuously and emit events as they arrive.
@@ -58,16 +86,38 @@ func (s *Scheduler) Run(ctx context.Context) error {
return fmt.Errorf("scheduler.Run: no jobs configured")
}
runCtx, cancel := context.WithCancel(ctx)
defer cancel()
fatalErrCh := make(chan error, 1)
var wg sync.WaitGroup
for _, job := range s.Jobs {
job := job // capture loop variable
go s.runJob(ctx, job)
wg.Add(1)
go func() {
defer wg.Done()
s.runJob(runCtx, job, fatalErrCh)
}()
}
<-ctx.Done()
return ctx.Err()
done := make(chan struct{})
go func() {
wg.Wait()
close(done)
}()
select {
case err := <-fatalErrCh:
cancel()
<-done
return err
case <-runCtx.Done():
<-done
return runCtx.Err()
}
}
func (s *Scheduler) runJob(ctx context.Context, job Job) {
func (s *Scheduler) runJob(ctx context.Context, job Job, fatalErrCh chan<- error) {
if job.Source == nil {
s.logf("scheduler: job has nil source")
return
@@ -75,7 +125,7 @@ func (s *Scheduler) runJob(ctx context.Context, job Job) {
// Stream sources: event-driven.
if ss, ok := job.Source.(sources.StreamSource); ok {
s.runStream(ctx, job, ss)
s.runStream(ctx, job, ss, fatalErrCh)
return
}
@@ -93,18 +143,51 @@ func (s *Scheduler) runJob(ctx context.Context, job Job) {
s.runPoller(ctx, job, ps)
}
func (s *Scheduler) runStream(ctx context.Context, job Job, src sources.StreamSource) {
func (s *Scheduler) runStream(ctx context.Context, job Job, src sources.StreamSource, fatalErrCh chan<- error) {
policy := effectiveStreamExitPolicy(job.StreamExitPolicy)
backoff := effectiveStreamBackoff(job.StreamBackoff)
rng := seededRNG(src.Name())
// Optional startup jitter: helps avoid reconnect storms if many daemons start at once.
if job.Jitter > 0 {
rng := seededRNG(src.Name())
if !sleepJitter(ctx, rng, job.Jitter) {
return
}
}
// Stream sources should block until ctx cancel or fatal error.
if err := src.Run(ctx, s.Out); err != nil && ctx.Err() == nil {
s.logf("scheduler: stream source %q exited with error: %v", src.Name(), err)
nextDelay := backoff.Initial
for {
startedAt := timeNow()
err := src.Run(ctx, s.Out)
if ctx.Err() != nil {
return
}
normalizedErr := normalizeStreamExitError(src.Name(), err)
if sources.IsStreamFatal(normalizedErr) {
s.reportFatal(fatalErrCh, fmt.Errorf("scheduler: stream source %q exited fatally: %w", src.Name(), normalizedErr))
return
}
switch policy {
case StreamExitPolicyStop:
s.logf("scheduler: stream source %q stopped after exit: %v", src.Name(), normalizedErr)
return
case StreamExitPolicyFatal:
s.reportFatal(fatalErrCh, fmt.Errorf("scheduler: stream source %q exited under fatal policy: %w", src.Name(), normalizedErr))
return
}
if streamRunWasStable(startedAt, timeNow()) {
nextDelay = backoff.Initial
}
delay := nextDelay + randomDuration(rng, backoff.Jitter)
s.logf("scheduler: stream source %q exited; restarting in %s: %v", src.Name(), delay, normalizedErr)
if !sleepDuration(ctx, delay) {
return
}
nextDelay = nextStreamBackoff(nextDelay, backoff.Max)
}
}
@@ -164,10 +247,77 @@ func (s *Scheduler) logf(format string, args ...any) {
s.Logf(format, args...)
}
func (s *Scheduler) reportFatal(ch chan<- error, err error) {
if err == nil {
return
}
select {
case ch <- err:
default:
}
}
// ---- helpers ----
func effectiveStreamExitPolicy(policy StreamExitPolicy) StreamExitPolicy {
switch policy {
case StreamExitPolicyStop, StreamExitPolicyFatal:
return policy
default:
return StreamExitPolicyRestart
}
}
func effectiveStreamBackoff(cfg StreamBackoff) StreamBackoff {
out := cfg
if out.Initial <= 0 {
out.Initial = defaultStreamBackoffInitial
}
if out.Max <= 0 {
out.Max = defaultStreamBackoffMax
}
if out.Max < out.Initial {
out.Max = out.Initial
}
if out.Jitter < 0 {
out.Jitter = 0
}
return out
}
func normalizeStreamExitError(sourceName string, err error) error {
if err != nil {
return err
}
return sources.StreamRetryable(fmt.Errorf("stream source %q exited unexpectedly without error", sourceName))
}
func nextStreamBackoff(current, max time.Duration) time.Duration {
if current <= 0 {
current = defaultStreamBackoffInitial
}
if max <= 0 {
max = defaultStreamBackoffMax
}
if current >= max {
return max
}
next := current * 2
if next < current || next > max {
return max
}
return next
}
func streamRunWasStable(startedAt, endedAt time.Time) bool {
if startedAt.IsZero() || endedAt.IsZero() {
return false
}
return endedAt.Sub(startedAt) >= streamBackoffResetAfter
}
func seededRNG(name string) *rand.Rand {
seed := time.Now().UnixNano() ^ int64(hashStringFNV32a(name))
seed := timeNow().UnixNano() ^ int64(hashStringFNV32a(name))
return rand.New(rand.NewSource(seed))
}
@@ -206,11 +356,23 @@ func sleepJitter(ctx context.Context, rng *rand.Rand, max time.Duration) bool {
return true
}
return sleepDuration(ctx, randomDuration(rng, max))
}
func randomDuration(rng *rand.Rand, max time.Duration) time.Duration {
if max <= 0 {
return 0
}
// Int63n requires a positive argument.
// We add 1 so max itself is attainable.
n := rng.Int63n(int64(max) + 1)
d := time.Duration(n)
return time.Duration(n)
}
func sleepDuration(ctx context.Context, d time.Duration) bool {
if d <= 0 {
return true
}
timer := time.NewTimer(d)
defer timer.Stop()

472
scheduler/scheduler_test.go Normal file
View File

@@ -0,0 +1,472 @@
package scheduler
import (
"context"
"errors"
"fmt"
"strings"
"sync"
"testing"
"time"
"gitea.maximumdirect.net/ejr/feedkit/config"
"gitea.maximumdirect.net/ejr/feedkit/event"
"gitea.maximumdirect.net/ejr/feedkit/sources"
)
type testPollSource struct {
name string
}
func (s testPollSource) Name() string { return s.name }
func (s testPollSource) Poll(context.Context) ([]event.Event, error) { return nil, nil }
type scriptedStreamSource struct {
name string
mu sync.Mutex
calls int
runs []func(context.Context, chan<- event.Event) error
}
func (s *scriptedStreamSource) Name() string { return s.name }
func (s *scriptedStreamSource) Run(ctx context.Context, out chan<- event.Event) error {
s.mu.Lock()
call := s.calls
s.calls++
var run func(context.Context, chan<- event.Event) error
if call < len(s.runs) {
run = s.runs[call]
}
s.mu.Unlock()
if run != nil {
return run(ctx, out)
}
<-ctx.Done()
return ctx.Err()
}
func (s *scriptedStreamSource) CallCount() int {
s.mu.Lock()
defer s.mu.Unlock()
return s.calls
}
type capturingLogger struct {
mu sync.Mutex
lines []string
}
func (l *capturingLogger) Logf(format string, args ...any) {
l.mu.Lock()
defer l.mu.Unlock()
l.lines = append(l.lines, fmt.Sprintf(format, args...))
}
func (l *capturingLogger) Contains(substr string) bool {
l.mu.Lock()
defer l.mu.Unlock()
for _, line := range l.lines {
if strings.Contains(line, substr) {
return true
}
}
return false
}
func TestSchedulerRunRestartsPlainStreamErrors(t *testing.T) {
src := &scriptedStreamSource{
name: "stream-a",
runs: []func(context.Context, chan<- event.Event) error{
func(context.Context, chan<- event.Event) error { return errors.New("temporary failure") },
func(context.Context, chan<- event.Event) error { return errors.New("temporary failure") },
},
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
s := &Scheduler{
Jobs: []Job{{
Source: src,
StreamBackoff: StreamBackoff{
Initial: time.Millisecond,
Max: time.Millisecond,
},
}},
Out: make(chan event.Event, 1),
}
errCh := make(chan error, 1)
go func() { errCh <- s.Run(ctx) }()
waitFor(t, func() bool { return src.CallCount() >= 3 })
cancel()
err := <-errCh
if !errors.Is(err, context.Canceled) {
t.Fatalf("Scheduler.Run() error = %v, want context canceled", err)
}
if src.CallCount() < 3 {
t.Fatalf("stream call count = %d, want at least 3", src.CallCount())
}
}
func TestSchedulerRunFatalStreamErrorReturns(t *testing.T) {
base := errors.New("fatal failure")
src := &scriptedStreamSource{
name: "stream-fatal",
runs: []func(context.Context, chan<- event.Event) error{
func(context.Context, chan<- event.Event) error { return sources.StreamFatal(base) },
},
}
s := &Scheduler{
Jobs: []Job{{Source: src}},
Out: make(chan event.Event, 1),
}
err := s.Run(context.Background())
if err == nil {
t.Fatalf("Scheduler.Run() error = nil, want fatal error")
}
if !sources.IsStreamFatal(err) {
t.Fatalf("Scheduler.Run() error = %v, want fatal classification", err)
}
if !errors.Is(err, base) {
t.Fatalf("Scheduler.Run() error does not wrap base fatal error: %v", err)
}
}
func TestSchedulerRunStopPolicyStopsOnlyThatSource(t *testing.T) {
src := &scriptedStreamSource{
name: "stream-stop",
runs: []func(context.Context, chan<- event.Event) error{
func(context.Context, chan<- event.Event) error { return errors.New("stop now") },
},
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
s := &Scheduler{
Jobs: []Job{{
Source: src,
StreamExitPolicy: StreamExitPolicyStop,
}},
Out: make(chan event.Event, 1),
}
errCh := make(chan error, 1)
go func() { errCh <- s.Run(ctx) }()
waitFor(t, func() bool { return src.CallCount() >= 1 })
time.Sleep(20 * time.Millisecond)
select {
case err := <-errCh:
t.Fatalf("Scheduler.Run() returned early: %v", err)
default:
}
if src.CallCount() != 1 {
t.Fatalf("stream call count = %d, want 1", src.CallCount())
}
cancel()
err := <-errCh
if !errors.Is(err, context.Canceled) {
t.Fatalf("Scheduler.Run() error = %v, want context canceled", err)
}
}
func TestSchedulerRunFatalPolicyTreatsPlainErrorAsFatal(t *testing.T) {
base := errors.New("plain failure")
src := &scriptedStreamSource{
name: "stream-fatal-policy",
runs: []func(context.Context, chan<- event.Event) error{
func(context.Context, chan<- event.Event) error { return base },
},
}
s := &Scheduler{
Jobs: []Job{{
Source: src,
StreamExitPolicy: StreamExitPolicyFatal,
}},
Out: make(chan event.Event, 1),
}
err := s.Run(context.Background())
if err == nil {
t.Fatalf("Scheduler.Run() error = nil, want fatal-policy error")
}
if !errors.Is(err, base) {
t.Fatalf("Scheduler.Run() error does not wrap base error: %v", err)
}
}
func TestSchedulerRunNilExitRestartsAsUnexpected(t *testing.T) {
logger := &capturingLogger{}
src := &scriptedStreamSource{
name: "stream-nil-exit",
runs: []func(context.Context, chan<- event.Event) error{
func(context.Context, chan<- event.Event) error { return nil },
},
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
s := &Scheduler{
Jobs: []Job{{
Source: src,
StreamBackoff: StreamBackoff{
Initial: time.Millisecond,
Max: time.Millisecond,
},
}},
Out: make(chan event.Event, 1),
Logf: logger.Logf,
}
errCh := make(chan error, 1)
go func() { errCh <- s.Run(ctx) }()
waitFor(t, func() bool { return src.CallCount() >= 2 })
cancel()
err := <-errCh
if !errors.Is(err, context.Canceled) {
t.Fatalf("Scheduler.Run() error = %v, want context canceled", err)
}
if !logger.Contains("exited unexpectedly without error") {
t.Fatalf("expected log to mention unexpected nil stream exit")
}
}
func TestSchedulerRunContextCancelDuringBackoff(t *testing.T) {
src := &scriptedStreamSource{
name: "stream-backoff-cancel",
runs: []func(context.Context, chan<- event.Event) error{
func(context.Context, chan<- event.Event) error { return errors.New("retry me") },
},
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
s := &Scheduler{
Jobs: []Job{{
Source: src,
StreamBackoff: StreamBackoff{
Initial: time.Second,
Max: time.Second,
},
}},
Out: make(chan event.Event, 1),
}
errCh := make(chan error, 1)
go func() { errCh <- s.Run(ctx) }()
waitFor(t, func() bool { return src.CallCount() >= 1 })
cancel()
err := <-errCh
if !errors.Is(err, context.Canceled) {
t.Fatalf("Scheduler.Run() error = %v, want context canceled", err)
}
time.Sleep(20 * time.Millisecond)
if src.CallCount() != 1 {
t.Fatalf("stream call count = %d, want 1", src.CallCount())
}
}
func TestNextStreamBackoffCapsAtMax(t *testing.T) {
if got := nextStreamBackoff(500*time.Millisecond, 2*time.Second); got != time.Second {
t.Fatalf("nextStreamBackoff() = %s, want 1s", got)
}
if got := nextStreamBackoff(time.Second, 2*time.Second); got != 2*time.Second {
t.Fatalf("nextStreamBackoff() = %s, want 2s", got)
}
if got := nextStreamBackoff(2*time.Second, 2*time.Second); got != 2*time.Second {
t.Fatalf("nextStreamBackoff() = %s, want 2s", got)
}
}
func TestStreamRunWasStableAfterFiveMinutes(t *testing.T) {
start := time.Date(2026, 3, 29, 12, 0, 0, 0, time.UTC)
if streamRunWasStable(start, start.Add(4*time.Minute+59*time.Second)) {
t.Fatalf("streamRunWasStable() = true, want false")
}
if !streamRunWasStable(start, start.Add(5*time.Minute)) {
t.Fatalf("streamRunWasStable() = false, want true")
}
}
func TestJobFromSourceConfigPollSource(t *testing.T) {
job, err := JobFromSourceConfig(testPollSource{name: "poll-a"}, config.SourceConfig{
Name: "poll-a",
Driver: "poll_driver",
Every: config.Duration{Duration: time.Minute},
})
if err != nil {
t.Fatalf("JobFromSourceConfig() error = %v", err)
}
if job.Every != time.Minute {
t.Fatalf("Job.Every = %s, want 1m", job.Every)
}
}
func TestJobFromSourceConfigPollSourceRejectsStreamParams(t *testing.T) {
_, err := JobFromSourceConfig(testPollSource{name: "poll-a"}, config.SourceConfig{
Name: "poll-a",
Driver: "poll_driver",
Every: config.Duration{Duration: time.Minute},
Params: map[string]any{
"stream_exit_policy": "restart",
},
})
if err == nil {
t.Fatalf("JobFromSourceConfig() error = nil, want rejection")
}
if !strings.Contains(err.Error(), "only valid for stream sources") {
t.Fatalf("JobFromSourceConfig() error = %q", err)
}
}
func TestJobFromSourceConfigStreamSourceParsesDefaultsAndOverrides(t *testing.T) {
src := &scriptedStreamSource{name: "stream-a"}
job, err := JobFromSourceConfig(src, config.SourceConfig{
Name: "stream-a",
Driver: "stream_driver",
Mode: config.SourceModeStream,
Params: map[string]any{
"stream_exit_policy": "stop",
"stream_backoff_initial": "2s",
"stream_backoff_max": "10s",
"stream_backoff_jitter": "500ms",
},
})
if err != nil {
t.Fatalf("JobFromSourceConfig() error = %v", err)
}
if job.StreamExitPolicy != StreamExitPolicyStop {
t.Fatalf("Job.StreamExitPolicy = %q, want %q", job.StreamExitPolicy, StreamExitPolicyStop)
}
if job.StreamBackoff.Initial != 2*time.Second {
t.Fatalf("Job.StreamBackoff.Initial = %s, want 2s", job.StreamBackoff.Initial)
}
if job.StreamBackoff.Max != 10*time.Second {
t.Fatalf("Job.StreamBackoff.Max = %s, want 10s", job.StreamBackoff.Max)
}
if job.StreamBackoff.Jitter != 500*time.Millisecond {
t.Fatalf("Job.StreamBackoff.Jitter = %s, want 500ms", job.StreamBackoff.Jitter)
}
defaultJob, err := JobFromSourceConfig(src, config.SourceConfig{
Name: "stream-default",
Driver: "stream_driver",
Mode: config.SourceModeStream,
})
if err != nil {
t.Fatalf("JobFromSourceConfig() default error = %v", err)
}
if defaultJob.StreamExitPolicy != StreamExitPolicyRestart {
t.Fatalf("default Job.StreamExitPolicy = %q, want restart", defaultJob.StreamExitPolicy)
}
if defaultJob.StreamBackoff.Initial != defaultStreamBackoffInitial {
t.Fatalf("default Job.StreamBackoff.Initial = %s, want %s", defaultJob.StreamBackoff.Initial, defaultStreamBackoffInitial)
}
if defaultJob.StreamBackoff.Max != defaultStreamBackoffMax {
t.Fatalf("default Job.StreamBackoff.Max = %s, want %s", defaultJob.StreamBackoff.Max, defaultStreamBackoffMax)
}
if defaultJob.StreamBackoff.Jitter != defaultStreamBackoffJitter {
t.Fatalf("default Job.StreamBackoff.Jitter = %s, want %s", defaultJob.StreamBackoff.Jitter, defaultStreamBackoffJitter)
}
}
func TestJobFromSourceConfigStreamSourceRejectsInvalidSettings(t *testing.T) {
src := &scriptedStreamSource{name: "stream-b"}
_, err := JobFromSourceConfig(src, config.SourceConfig{
Name: "stream-b",
Driver: "stream_driver",
Mode: config.SourceModeStream,
Params: map[string]any{
"stream_exit_policy": "sometimes",
},
})
if err == nil {
t.Fatalf("JobFromSourceConfig() error = nil, want invalid policy error")
}
if !strings.Contains(err.Error(), "stream_exit_policy") {
t.Fatalf("JobFromSourceConfig() error = %q", err)
}
_, err = JobFromSourceConfig(src, config.SourceConfig{
Name: "stream-b",
Driver: "stream_driver",
Mode: config.SourceModeStream,
Params: map[string]any{
"stream_backoff_initial": "0s",
},
})
if err == nil {
t.Fatalf("JobFromSourceConfig() error = nil, want invalid initial backoff error")
}
if !strings.Contains(err.Error(), "stream_backoff_initial") {
t.Fatalf("JobFromSourceConfig() error = %q", err)
}
_, err = JobFromSourceConfig(src, config.SourceConfig{
Name: "stream-b",
Driver: "stream_driver",
Mode: config.SourceModeStream,
Params: map[string]any{
"stream_backoff_initial": "2s",
"stream_backoff_max": "1s",
},
})
if err == nil {
t.Fatalf("JobFromSourceConfig() error = nil, want invalid max backoff error")
}
if !strings.Contains(err.Error(), "stream_backoff_max") {
t.Fatalf("JobFromSourceConfig() error = %q", err)
}
}
func TestJobFromSourceConfigStreamSourceRejectsEvery(t *testing.T) {
src := &scriptedStreamSource{name: "stream-c"}
_, err := JobFromSourceConfig(src, config.SourceConfig{
Name: "stream-c",
Driver: "stream_driver",
Mode: config.SourceModeStream,
Every: config.Duration{Duration: time.Minute},
})
if err == nil {
t.Fatalf("JobFromSourceConfig() error = nil, want every rejection")
}
if !strings.Contains(err.Error(), "sources[].every must be omitted") {
t.Fatalf("JobFromSourceConfig() error = %q", err)
}
}
func waitFor(t *testing.T, cond func() bool) {
t.Helper()
deadline := time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
if cond() {
return
}
time.Sleep(10 * time.Millisecond)
}
t.Fatalf("condition not satisfied before timeout")
}

View File

@@ -4,18 +4,15 @@ import (
"context"
"database/sql"
"fmt"
"net/url"
"strconv"
"strings"
"time"
"gitea.maximumdirect.net/ejr/feedkit/config"
"gitea.maximumdirect.net/ejr/feedkit/event"
_ "github.com/lib/pq"
pgconn "gitea.maximumdirect.net/ejr/feedkit/internal/postgres"
)
const postgresInitTimeout = 5 * time.Second
type postgresTx interface {
ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error)
Commit() error
@@ -73,8 +70,8 @@ func (w *sqlTxWrapper) Rollback() error {
return w.tx.Rollback()
}
var openPostgresDB = func(dsn string) (postgresDB, error) {
db, err := sql.Open("postgres", dsn)
var openPostgresDB = func(ctx context.Context, cfg pgconn.ConnConfig) (postgresDB, error) {
db, err := pgconn.Open(ctx, cfg)
if err != nil {
return nil, err
}
@@ -111,12 +108,11 @@ func NewPostgresSinkFromConfig(cfg config.SinkConfig, schemaDef PostgresSchema)
return nil, fmt.Errorf("postgres sink %q: compile schema: %w", cfg.Name, err)
}
dsn, err := buildPostgresDSN(uri, username, password)
if err != nil {
return nil, fmt.Errorf("postgres sink %q: build dsn: %w", cfg.Name, err)
}
db, err := openPostgresDB(dsn)
db, err := openPostgresDB(context.Background(), pgconn.ConnConfig{
URI: uri,
Username: username,
Password: password,
})
if err != nil {
return nil, fmt.Errorf("postgres sink %q: open db: %w", cfg.Name, err)
}
@@ -264,13 +260,9 @@ func (p *PostgresSink) PruneAllOlderThan(ctx context.Context, cutoff time.Time)
}
func (p *PostgresSink) initialize() error {
ctx, cancel := context.WithTimeout(context.Background(), postgresInitTimeout)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := p.db.PingContext(ctx); err != nil {
return fmt.Errorf("postgres sink %q: ping db: %w", p.name, err)
}
for _, tableName := range p.schema.tableOrder {
tbl := p.schema.tables[tableName]
@@ -302,21 +294,6 @@ func (p *PostgresSink) lookupTable(table string) (postgresTableCompiled, error)
return tbl, nil
}
func buildPostgresDSN(uri, username, password string) (string, error) {
u, err := url.Parse(strings.TrimSpace(uri))
if err != nil {
return "", fmt.Errorf("invalid uri: %w", err)
}
if u.Scheme == "" {
return "", fmt.Errorf("invalid uri: missing scheme")
}
if u.Host == "" {
return "", fmt.Errorf("invalid uri: missing host")
}
u.User = url.UserPassword(username, password)
return u.String(), nil
}
func parsePostgresPruneWindow(cfg config.SinkConfig) (time.Duration, error) {
raw, ok := cfg.Params["prune"]
if !ok || raw == nil {

View File

@@ -4,13 +4,13 @@ import (
"context"
"database/sql"
"errors"
"net/url"
"strings"
"testing"
"time"
"gitea.maximumdirect.net/ejr/feedkit/config"
"gitea.maximumdirect.net/ejr/feedkit/event"
pgconn "gitea.maximumdirect.net/ejr/feedkit/internal/postgres"
)
type fakeResult struct {
@@ -223,10 +223,13 @@ func TestPostgresFactoryBuildsMultipleSinksWithSameSchema(t *testing.T) {
withPostgresTestState(t)
dbs := []*fakeDB{{}, {}}
var gotDSNs []string
openPostgresDB = func(dsn string) (postgresDB, error) {
gotDSNs = append(gotDSNs, dsn)
db := dbs[len(gotDSNs)-1]
var gotCfgs []pgconn.ConnConfig
openPostgresDB = func(ctx context.Context, cfg pgconn.ConnConfig) (postgresDB, error) {
gotCfgs = append(gotCfgs, cfg)
db := dbs[len(gotCfgs)-1]
if err := db.PingContext(ctx); err != nil {
return nil, err
}
return db, nil
}
@@ -252,8 +255,11 @@ func TestPostgresFactoryBuildsMultipleSinksWithSameSchema(t *testing.T) {
}
}
if len(gotDSNs) != 2 {
t.Fatalf("len(gotDSNs) = %d, want 2", len(gotDSNs))
if len(gotCfgs) != 2 {
t.Fatalf("len(gotCfgs) = %d, want 2", len(gotCfgs))
}
if gotCfgs[0].Username != "user" || gotCfgs[0].Password != "pass" {
t.Fatalf("first ConnConfig = %+v", gotCfgs[0])
}
for i, db := range dbs {
if db.pingCalls != 1 {
@@ -327,9 +333,12 @@ func TestNewPostgresSinkFromConfigEagerInit(t *testing.T) {
withPostgresTestState(t)
db := &fakeDB{}
var gotDSN string
openPostgresDB = func(dsn string) (postgresDB, error) {
gotDSN = dsn
var gotCfg pgconn.ConnConfig
openPostgresDB = func(ctx context.Context, cfg pgconn.ConnConfig) (postgresDB, error) {
gotCfg = cfg
if err := db.PingContext(ctx); err != nil {
return nil, err
}
return db, nil
}
@@ -362,16 +371,14 @@ func TestNewPostgresSinkFromConfigEagerInit(t *testing.T) {
t.Fatalf("unexpected create index query: %s", db.execCalls[1].query)
}
u, err := url.Parse(gotDSN)
if err != nil {
t.Fatalf("parse dsn: %v", err)
if gotCfg.URI != "postgres://db.example.local:5432/feedkit?sslmode=disable" {
t.Fatalf("URI = %q", gotCfg.URI)
}
if u.User == nil || u.User.Username() != "app_user" {
t.Fatalf("dsn missing username: %q", gotDSN)
if gotCfg.Username != "app_user" {
t.Fatalf("Username = %q, want app_user", gotCfg.Username)
}
pass, ok := u.User.Password()
if !ok || pass != "app_pass" {
t.Fatalf("dsn missing password: %q", gotDSN)
if gotCfg.Password != "app_pass" {
t.Fatalf("Password = %q, want app_pass", gotCfg.Password)
}
}
@@ -379,7 +386,7 @@ func TestNewPostgresSinkFromConfigInitFailureClosesDB(t *testing.T) {
withPostgresTestState(t)
db := &fakeDB{execErrOnCall: 1, execErr: errors.New("ddl failed")}
openPostgresDB = func(_ string) (postgresDB, error) {
openPostgresDB = func(_ context.Context, _ pgconn.ConnConfig) (postgresDB, error) {
return db, nil
}
@@ -415,7 +422,7 @@ func TestNewPostgresSinkFromConfigPruneParamAccepted(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
withPostgresTestState(t)
openPostgresDB = func(_ string) (postgresDB, error) {
openPostgresDB = func(_ context.Context, _ pgconn.ConnConfig) (postgresDB, error) {
return &fakeDB{}, nil
}

View File

@@ -5,8 +5,12 @@
// - Input: common source identity surface
// - PollSource: polling source interface
// - StreamSource: streaming source interface
// - StreamRetryable / StreamFatal / IsStreamRetryable / IsStreamFatal:
// stream exit classification helpers
// - Registry / NewRegistry: source driver registry and builders
// - HTTPSource / NewHTTPSource: reusable HTTP polling helper
// - PostgresQuerySource / NewPostgresQuerySource: reusable Postgres polling
// helper
//
// Source drivers are domain-specific and registered into Registry by driver name.
// Registry can then build configured sources from config.SourceConfig.
@@ -32,4 +36,17 @@
// When validators are available, NewHTTPSource prefers ETag/If-None-Match and
// falls back to Last-Modified/If-Modified-Since. A 304 Not Modified response is
// treated as a successful unchanged poll.
//
// Postgres-backed polling sources can share NewPostgresQuerySource for generic
// DB config parsing and query execution. The helper understands:
// - params.uri
// - params.username
// - params.password
// - params.query
// - params.query_timeout (optional, default 30s)
//
// feedkit does not register a built-in postgres poll driver. Downstream daemons
// should register domain-specific driver names that call
// NewPostgresQuerySource, then keep SQL semantics, row scanning, ordering,
// watermark policy, and event construction in their own source types.
package sources

117
sources/postgres.go Normal file
View File

@@ -0,0 +1,117 @@
package sources
import (
"context"
"database/sql"
"fmt"
"strings"
"time"
"gitea.maximumdirect.net/ejr/feedkit/config"
pgconn "gitea.maximumdirect.net/ejr/feedkit/internal/postgres"
)
const defaultPostgresQueryTimeout = 30 * time.Second
type postgresQueryDB interface {
QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error)
}
var openPostgresQueryDB = func(ctx context.Context, cfg pgconn.ConnConfig) (postgresQueryDB, error) {
return pgconn.Open(ctx, cfg)
}
// PostgresQuerySource is a reusable helper for polling Postgres-backed sources.
//
// It centralizes generic source config parsing and query execution. Concrete
// daemon sources remain responsible for SQL semantics, row scanning, cursoring,
// and event construction.
type PostgresQuerySource struct {
Driver string
Name string
SQL string
QueryTimeout time.Duration
db postgresQueryDB
}
// NewPostgresQuerySource builds a generic Postgres polling helper from
// SourceConfig.
//
// Required params:
// - params.uri
// - params.username
// - params.password
// - params.query
//
// Optional params:
// - params.query_timeout (default 30s)
func NewPostgresQuerySource(driver string, cfg config.SourceConfig) (*PostgresQuerySource, error) {
name := strings.TrimSpace(cfg.Name)
if name == "" {
return nil, fmt.Errorf("%s: name is required", driver)
}
if cfg.Params == nil {
return nil, fmt.Errorf("%s %q: params are required (need params.uri, params.username, params.password, and params.query)", driver, cfg.Name)
}
uri, ok := cfg.ParamString("uri")
if !ok {
return nil, fmt.Errorf("%s %q: params.uri is required", driver, cfg.Name)
}
username, ok := cfg.ParamString("username")
if !ok {
return nil, fmt.Errorf("%s %q: params.username is required", driver, cfg.Name)
}
password, ok := cfg.ParamString("password")
if !ok {
return nil, fmt.Errorf("%s %q: params.password is required", driver, cfg.Name)
}
query, ok := cfg.ParamString("query")
if !ok {
return nil, fmt.Errorf("%s %q: params.query is required", driver, cfg.Name)
}
queryTimeout := defaultPostgresQueryTimeout
if _, exists := cfg.Params["query_timeout"]; exists {
var ok bool
queryTimeout, ok = cfg.ParamDuration("query_timeout")
if !ok || queryTimeout <= 0 {
return nil, fmt.Errorf("source %q: params.query_timeout must be a positive duration", cfg.Name)
}
}
db, err := openPostgresQueryDB(context.Background(), pgconn.ConnConfig{
URI: uri,
Username: username,
Password: password,
})
if err != nil {
return nil, fmt.Errorf("%s %q: open db: %w", driver, cfg.Name, err)
}
return &PostgresQuerySource{
Driver: driver,
Name: name,
SQL: query,
QueryTimeout: queryTimeout,
db: db,
}, nil
}
func (s *PostgresQuerySource) Query(ctx context.Context, args ...any) (*sql.Rows, error) {
queryCtx := ctx
if s.QueryTimeout > 0 {
if deadline, ok := ctx.Deadline(); !ok || time.Until(deadline) > s.QueryTimeout {
// We intentionally do not cancel this derived context here because the
// returned rows may still be reading from the database.
queryCtx, _ = context.WithTimeout(ctx, s.QueryTimeout)
}
}
rows, err := s.db.QueryContext(queryCtx, s.SQL, args...)
if err != nil {
return nil, fmt.Errorf("%s %q: query: %w", s.Driver, s.Name, err)
}
return rows, nil
}

352
sources/postgres_test.go Normal file
View File

@@ -0,0 +1,352 @@
package sources
import (
"context"
"database/sql"
"database/sql/driver"
"errors"
"io"
"strings"
"sync"
"testing"
"time"
"gitea.maximumdirect.net/ejr/feedkit/config"
"gitea.maximumdirect.net/ejr/feedkit/event"
pgconn "gitea.maximumdirect.net/ejr/feedkit/internal/postgres"
)
type fakePostgresQueryDB struct {
queryErr error
lastCtx context.Context
lastQuery string
lastArgs []any
returnRows *sql.Rows
}
func (db *fakePostgresQueryDB) QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error) {
db.lastCtx = ctx
db.lastQuery = query
db.lastArgs = append([]any(nil), args...)
if db.queryErr != nil {
return nil, db.queryErr
}
return db.returnRows, nil
}
func withPostgresQuerySourceTestState(t *testing.T) {
t.Helper()
oldOpen := openPostgresQueryDB
t.Cleanup(func() {
openPostgresQueryDB = oldOpen
})
}
func TestNewPostgresQuerySourceMissingParams(t *testing.T) {
withPostgresQuerySourceTestState(t)
tests := []struct {
name string
params map[string]any
want string
}{
{name: "missing uri", params: map[string]any{"username": "u", "password": "p", "query": "SELECT 1"}, want: "params.uri"},
{name: "missing username", params: map[string]any{"uri": "postgres://localhost/db", "password": "p", "query": "SELECT 1"}, want: "params.username"},
{name: "missing password", params: map[string]any{"uri": "postgres://localhost/db", "username": "u", "query": "SELECT 1"}, want: "params.password"},
{name: "missing query", params: map[string]any{"uri": "postgres://localhost/db", "username": "u", "password": "p"}, want: "params.query"},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
_, err := NewPostgresQuerySource("test_driver", config.SourceConfig{
Name: "pg-source",
Driver: "test_driver",
Params: tc.params,
})
if err == nil {
t.Fatalf("NewPostgresQuerySource() error = nil, want error")
}
if !strings.Contains(err.Error(), tc.want) {
t.Fatalf("NewPostgresQuerySource() error = %q, want substring %q", err, tc.want)
}
})
}
}
func TestNewPostgresQuerySourceRejectsInvalidQueryTimeout(t *testing.T) {
_, err := NewPostgresQuerySource("test_driver", config.SourceConfig{
Name: "pg-source",
Driver: "test_driver",
Params: map[string]any{
"uri": "postgres://localhost/db",
"username": "u",
"password": "p",
"query": "SELECT 1",
"query_timeout": "soon",
},
})
if err == nil {
t.Fatalf("NewPostgresQuerySource() error = nil, want error")
}
if !strings.Contains(err.Error(), "params.query_timeout must be a positive duration") {
t.Fatalf("NewPostgresQuerySource() error = %q", err)
}
}
func TestNewPostgresQuerySourceSuccessfulConstruction(t *testing.T) {
withPostgresQuerySourceTestState(t)
db := &fakePostgresQueryDB{}
var gotCfg pgconn.ConnConfig
openPostgresQueryDB = func(_ context.Context, cfg pgconn.ConnConfig) (postgresQueryDB, error) {
gotCfg = cfg
return db, nil
}
src, err := NewPostgresQuerySource("test_driver", config.SourceConfig{
Name: "pg-source",
Driver: "test_driver",
Params: map[string]any{
"uri": "postgres://db.example.local/feedkit",
"username": "app_user",
"password": "app_pass",
"query": "SELECT * FROM observations",
"query_timeout": "45s",
},
})
if err != nil {
t.Fatalf("NewPostgresQuerySource() error = %v", err)
}
if src.Name != "pg-source" {
t.Fatalf("Name = %q, want pg-source", src.Name)
}
if src.QueryTimeout != 45*time.Second {
t.Fatalf("QueryTimeout = %s, want 45s", src.QueryTimeout)
}
if src.SQL != "SELECT * FROM observations" {
t.Fatalf("SQL = %q", src.SQL)
}
if gotCfg.Username != "app_user" || gotCfg.Password != "app_pass" {
t.Fatalf("ConnConfig = %+v", gotCfg)
}
}
func TestNewPostgresQuerySourceOpenFailure(t *testing.T) {
withPostgresQuerySourceTestState(t)
openPostgresQueryDB = func(_ context.Context, _ pgconn.ConnConfig) (postgresQueryDB, error) {
return nil, errors.New("db unavailable")
}
_, err := NewPostgresQuerySource("test_driver", config.SourceConfig{
Name: "pg-source",
Driver: "test_driver",
Params: map[string]any{
"uri": "postgres://localhost/db",
"username": "u",
"password": "p",
"query": "SELECT 1",
},
})
if err == nil {
t.Fatalf("NewPostgresQuerySource() error = nil, want error")
}
if !strings.Contains(err.Error(), `test_driver "pg-source": open db: db unavailable`) {
t.Fatalf("NewPostgresQuerySource() error = %q", err)
}
}
func TestPostgresQuerySourceQueryAppliesTimeoutAndWrapsError(t *testing.T) {
db := &fakePostgresQueryDB{queryErr: errors.New("query failed")}
src := &PostgresQuerySource{
Driver: "test_driver",
Name: "pg-source",
SQL: "SELECT 1",
QueryTimeout: 30 * time.Second,
db: db,
}
ctx := context.Background()
_, err := src.Query(ctx, "arg1")
if err == nil {
t.Fatalf("Query() error = nil, want error")
}
if !strings.Contains(err.Error(), `test_driver "pg-source": query: query failed`) {
t.Fatalf("Query() error = %q", err)
}
if db.lastCtx == nil {
t.Fatalf("lastCtx = nil")
}
if _, ok := db.lastCtx.Deadline(); !ok {
t.Fatalf("expected derived deadline on query context")
}
if db.lastQuery != "SELECT 1" {
t.Fatalf("lastQuery = %q", db.lastQuery)
}
if len(db.lastArgs) != 1 || db.lastArgs[0] != "arg1" {
t.Fatalf("lastArgs = %#v", db.lastArgs)
}
}
func TestPostgresQuerySourceQueryUsesEarlierCallerDeadline(t *testing.T) {
db := &fakePostgresQueryDB{queryErr: errors.New("query failed")}
src := &PostgresQuerySource{
Driver: "test_driver",
Name: "pg-source",
SQL: "SELECT 1",
QueryTimeout: 30 * time.Second,
db: db,
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
defer cancel()
_, _ = src.Query(ctx)
if db.lastCtx != ctx {
t.Fatalf("expected source to reuse earlier caller deadline")
}
}
func TestPostgresQuerySourceSupportsDownstreamPollingPattern(t *testing.T) {
withPostgresQuerySourceTestState(t)
db, cleanup := openRowsTestDB(t, "feedkit_sources_pg_rows", []string{"event_id"}, [][]driver.Value{{"evt-1"}})
defer cleanup()
openPostgresQueryDB = func(_ context.Context, _ pgconn.ConnConfig) (postgresQueryDB, error) {
return db, nil
}
type fakeDownstreamSource struct {
pg *PostgresQuerySource
}
poll := func(s fakeDownstreamSource, ctx context.Context) ([]event.Event, error) {
rows, err := s.pg.Query(ctx)
if err != nil {
return nil, err
}
defer rows.Close()
var out []event.Event
for rows.Next() {
var eventID string
if err := rows.Scan(&eventID); err != nil {
return nil, err
}
out = append(out, event.Event{
ID: eventID,
Kind: event.Kind("observation"),
Source: s.pg.Name,
Schema: "raw.test.v1",
EmittedAt: time.Now().UTC(),
Payload: map[string]any{"event_id": eventID},
})
}
if err := rows.Err(); err != nil {
return nil, err
}
return out, nil
}
pg, err := NewPostgresQuerySource("test_driver", config.SourceConfig{
Name: "pg-source",
Driver: "test_driver",
Params: map[string]any{
"uri": "postgres://localhost/db",
"username": "u",
"password": "p",
"query": "SELECT event_id FROM events",
},
})
if err != nil {
t.Fatalf("NewPostgresQuerySource() error = %v", err)
}
events, err := poll(fakeDownstreamSource{pg: pg}, context.Background())
if err != nil {
t.Fatalf("poll() error = %v", err)
}
if len(events) != 1 {
t.Fatalf("len(events) = %d, want 1", len(events))
}
if events[0].ID != "evt-1" {
t.Fatalf("events[0].ID = %q, want evt-1", events[0].ID)
}
}
var (
rowsDriverMu sync.Mutex
rowsDriverSeen = map[string]bool{}
)
func openRowsTestDB(t *testing.T, driverName string, columns []string, rows [][]driver.Value) (*sql.DB, func()) {
t.Helper()
rowsDriverMu.Lock()
if !rowsDriverSeen[driverName] {
sql.Register(driverName, &rowsTestDriver{columns: append([]string(nil), columns...), rows: cloneDriverRows(rows)})
rowsDriverSeen[driverName] = true
}
rowsDriverMu.Unlock()
db, err := sql.Open(driverName, "")
if err != nil {
t.Fatalf("sql.Open() error = %v", err)
}
return db, func() {
_ = db.Close()
}
}
func cloneDriverRows(in [][]driver.Value) [][]driver.Value {
out := make([][]driver.Value, 0, len(in))
for _, row := range in {
copied := append([]driver.Value(nil), row...)
out = append(out, copied)
}
return out
}
type rowsTestDriver struct {
columns []string
rows [][]driver.Value
}
func (d *rowsTestDriver) Open(string) (driver.Conn, error) {
return &rowsTestConn{columns: append([]string(nil), d.columns...), rows: cloneDriverRows(d.rows)}, nil
}
type rowsTestConn struct {
columns []string
rows [][]driver.Value
}
func (c *rowsTestConn) Prepare(string) (driver.Stmt, error) {
return nil, errors.New("not implemented")
}
func (c *rowsTestConn) Close() error { return nil }
func (c *rowsTestConn) Begin() (driver.Tx, error) { return nil, errors.New("not implemented") }
func (c *rowsTestConn) QueryContext(_ context.Context, _ string, _ []driver.NamedValue) (driver.Rows, error) {
return &rowsTestRows{columns: append([]string(nil), c.columns...), rows: cloneDriverRows(c.rows)}, nil
}
type rowsTestRows struct {
columns []string
rows [][]driver.Value
idx int
}
func (r *rowsTestRows) Columns() []string { return append([]string(nil), r.columns...) }
func (r *rowsTestRows) Close() error { return nil }
func (r *rowsTestRows) Next(dest []driver.Value) error {
if r.idx >= len(r.rows) {
return io.EOF
}
copy(dest, r.rows[r.idx])
r.idx++
return nil
}

View File

@@ -35,6 +35,9 @@ type PollSource interface {
//
// Run should block, producing events into `out` until ctx is cancelled or a fatal error occurs.
// It MUST NOT close out (the scheduler/daemon owns the bus).
//
// Stream sources can classify exits by wrapping errors with StreamRetryable or
// StreamFatal. Plain non-nil errors are treated as retryable by the scheduler.
type StreamSource interface {
Input
Run(ctx context.Context, out chan<- event.Event) error

63
sources/stream_errors.go Normal file
View File

@@ -0,0 +1,63 @@
package sources
import "errors"
type streamRetryableError struct {
err error
}
func (e *streamRetryableError) Error() string {
if e.err == nil {
return "retryable stream error"
}
return e.err.Error()
}
func (e *streamRetryableError) Unwrap() error { return e.err }
type streamFatalError struct {
err error
}
func (e *streamFatalError) Error() string {
if e.err == nil {
return "fatal stream error"
}
return e.err.Error()
}
func (e *streamFatalError) Unwrap() error { return e.err }
// StreamRetryable marks a stream-source exit as retryable.
func StreamRetryable(err error) error {
if err == nil {
return nil
}
return &streamRetryableError{err: err}
}
// StreamFatal marks a stream-source exit as fatal.
func StreamFatal(err error) error {
if err == nil {
return nil
}
return &streamFatalError{err: err}
}
// IsStreamRetryable reports whether err contains a retryable stream marker.
func IsStreamRetryable(err error) bool {
if err == nil {
return false
}
var target *streamRetryableError
return errors.As(err, &target)
}
// IsStreamFatal reports whether err contains a fatal stream marker.
func IsStreamFatal(err error) bool {
if err == nil {
return false
}
var target *streamFatalError
return errors.As(err, &target)
}

View File

@@ -0,0 +1,52 @@
package sources
import (
"errors"
"fmt"
"testing"
)
func TestStreamRetryableWrapsThroughErrorChains(t *testing.T) {
base := errors.New("retry me")
err := fmt.Errorf("outer: %w", StreamRetryable(base))
if !IsStreamRetryable(err) {
t.Fatalf("IsStreamRetryable() = false, want true")
}
if IsStreamFatal(err) {
t.Fatalf("IsStreamFatal() = true, want false")
}
if !errors.Is(err, base) {
t.Fatalf("errors.Is(err, base) = false, want true")
}
}
func TestStreamFatalWrapsThroughErrorChains(t *testing.T) {
base := errors.New("fatal")
err := fmt.Errorf("outer: %w", StreamFatal(base))
if !IsStreamFatal(err) {
t.Fatalf("IsStreamFatal() = false, want true")
}
if IsStreamRetryable(err) {
t.Fatalf("IsStreamRetryable() = true, want false")
}
if !errors.Is(err, base) {
t.Fatalf("errors.Is(err, base) = false, want true")
}
}
func TestStreamErrorHelpersNil(t *testing.T) {
if StreamRetryable(nil) != nil {
t.Fatalf("StreamRetryable(nil) != nil")
}
if StreamFatal(nil) != nil {
t.Fatalf("StreamFatal(nil) != nil")
}
if IsStreamRetryable(nil) {
t.Fatalf("IsStreamRetryable(nil) = true")
}
if IsStreamFatal(nil) {
t.Fatalf("IsStreamFatal(nil) = true")
}
}