Compare commits
4 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 3ef93faf69 | |||
| 4910440756 | |||
| 3b92c2284d | |||
| 215afe1acf |
12
README.md
12
README.md
@@ -15,12 +15,12 @@ lifecycle, domain schemas, and domain-specific validation in your daemon.
|
||||
|
||||
## Conceptual pipeline
|
||||
|
||||
Collect -> Process (optional stages, including normalize) -> Route -> Emit
|
||||
Collect -> Process (optional stages, including dedupe + normalize) -> Route -> Emit
|
||||
|
||||
| Stage | Package(s) |
|
||||
|---|---|
|
||||
| Collect | `sources`, `scheduler` |
|
||||
| Process | `pipeline`, `processors`, `normalize` (optional stage) |
|
||||
| Process | `pipeline`, `processors`, `processors/dedupe`, `processors/normalize` (optional stages) |
|
||||
| Route | `dispatch` |
|
||||
| Emit | `sinks` |
|
||||
| Configure | `config` |
|
||||
@@ -86,7 +86,11 @@ Processors can transform, drop, or reject events.
|
||||
Defines the generic processor interface and a named-driver registry used by
|
||||
daemons to build ordered processor chains.
|
||||
|
||||
### `normalize`
|
||||
### `processors/dedupe`
|
||||
|
||||
Built-in in-memory LRU dedupe processor that drops repeated events by `Event.ID`.
|
||||
|
||||
### `processors/normalize`
|
||||
|
||||
Concrete normalization processor implementation. Typical use: sources emit raw
|
||||
payload events, then a normalize stage maps them to canonical schemas.
|
||||
@@ -100,7 +104,7 @@ Compiles routes and fans out events to sinks with per-sink queue/worker isolatio
|
||||
Defines sink interface and sink registry. Built-ins include:
|
||||
- `stdout`
|
||||
- `nats`
|
||||
- `postgres` (downstream registers table schema + event mapper; feedkit handles create-if-missing DDL, transactional inserts, and optional prune APIs)
|
||||
- `postgres`
|
||||
|
||||
Detailed Postgres configuration and wiring examples live in package docs:
|
||||
`sinks/doc.go`.
|
||||
|
||||
25
doc.go
25
doc.go
@@ -5,12 +5,12 @@
|
||||
//
|
||||
// Conceptual flow:
|
||||
//
|
||||
// Collect -> Process (optional stages, including normalize) -> Route -> Emit
|
||||
// Collect -> Process (optional stages, including dedupe + normalize) -> Route -> Emit
|
||||
//
|
||||
// In feedkit this maps to:
|
||||
//
|
||||
// Collect: sources + scheduler
|
||||
// Process: pipeline + processors + normalize (optional stage)
|
||||
// Process: pipeline + processors + processors/dedupe + processors/normalize (optional stages)
|
||||
// Route: dispatch
|
||||
// Emit: sinks
|
||||
// Config: config
|
||||
@@ -50,6 +50,19 @@
|
||||
// Both share Input{Name()}. A source may emit 0..N events per poll/run step,
|
||||
// and may emit multiple event kinds.
|
||||
//
|
||||
// For HTTP-backed polling sources, sources.NewHTTPSource provides a shared
|
||||
// helper for generic params:
|
||||
//
|
||||
// - params.url
|
||||
//
|
||||
// - params.user_agent
|
||||
//
|
||||
// - params.conditional (optional, default true)
|
||||
//
|
||||
// When conditional polling is enabled, feedkit opportunistically uses ETag
|
||||
// and Last-Modified validators. A 304 Not Modified response is treated as a
|
||||
// successful poll that emits no events.
|
||||
//
|
||||
// - scheduler
|
||||
// Runs one goroutine per job:
|
||||
//
|
||||
@@ -64,7 +77,10 @@
|
||||
// - processors
|
||||
// Generic processor interface and named factory registry for wiring chains.
|
||||
//
|
||||
// - normalize
|
||||
// - processors/dedupe
|
||||
// Built-in in-memory LRU dedupe processor keyed by Event.ID.
|
||||
//
|
||||
// - processors/normalize
|
||||
// Concrete pipeline processor for raw->canonical mapping.
|
||||
// If no normalizer matches, the event passes through unchanged by default.
|
||||
//
|
||||
@@ -75,7 +91,8 @@
|
||||
// Sink abstractions + sink registry.
|
||||
// Built-ins include stdout, NATS, and Postgres. For Postgres, downstream
|
||||
// code registers table schemas/mappers while feedkit manages DDL, writes,
|
||||
// and optional prune helpers.
|
||||
// optional automatic retention pruning (via sink params.prune), and
|
||||
// manual prune helpers. Postgres table schemas must declare PruneColumn.
|
||||
//
|
||||
// Typical wiring (daemon main.go)
|
||||
//
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
package pipeline
|
||||
|
||||
// Placeholder for dedupe processor:
|
||||
// - key by Event.ID or computed key
|
||||
// - in-memory store first; later optional Postgres-backed
|
||||
28
processors/dedupe/doc.go
Normal file
28
processors/dedupe/doc.go
Normal file
@@ -0,0 +1,28 @@
|
||||
// Package dedupe provides a default in-memory LRU deduplication processor.
|
||||
//
|
||||
// The processor keys strictly by event.Event.ID:
|
||||
// - first-seen IDs pass through
|
||||
// - repeated IDs are dropped
|
||||
//
|
||||
// The in-memory seen-ID set is bounded by a required maxEntries capacity.
|
||||
// When capacity is exceeded, the least recently used ID is evicted.
|
||||
//
|
||||
// Typical registry wiring:
|
||||
//
|
||||
// ```go
|
||||
// reg := processors.NewRegistry()
|
||||
// reg.Register("dedupe", dedupe.Factory(10_000))
|
||||
//
|
||||
// reg.Register("normalize", func() (processors.Processor, error) {
|
||||
// return normalize.NewProcessor(myNormalizers, false), nil
|
||||
// })
|
||||
//
|
||||
// chain, err := reg.BuildChain([]string{"dedupe", "normalize"})
|
||||
//
|
||||
// if err != nil {
|
||||
// // handle wiring error
|
||||
// }
|
||||
//
|
||||
// p := &pipeline.Pipeline{Processors: chain}
|
||||
// ```
|
||||
package dedupe
|
||||
89
processors/dedupe/processor.go
Normal file
89
processors/dedupe/processor.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package dedupe
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"gitea.maximumdirect.net/ejr/feedkit/event"
|
||||
"gitea.maximumdirect.net/ejr/feedkit/processors"
|
||||
)
|
||||
|
||||
// Processor drops duplicate events by Event.ID using an in-memory LRU.
|
||||
type Processor struct {
|
||||
maxEntries int
|
||||
|
||||
mu sync.Mutex
|
||||
order *list.List // most-recent at front, least-recent at back
|
||||
byID map[string]*list.Element // id -> list element (element.Value is string id)
|
||||
}
|
||||
|
||||
var _ processors.Processor = (*Processor)(nil)
|
||||
|
||||
// NewProcessor constructs a dedupe processor with a required max entry count.
|
||||
func NewProcessor(maxEntries int) (*Processor, error) {
|
||||
if maxEntries <= 0 {
|
||||
return nil, fmt.Errorf("dedupe: maxEntries must be > 0, got %d", maxEntries)
|
||||
}
|
||||
|
||||
return &Processor{
|
||||
maxEntries: maxEntries,
|
||||
order: list.New(),
|
||||
byID: make(map[string]*list.Element, maxEntries),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Factory returns a processors.Factory that constructs Processor instances.
|
||||
func Factory(maxEntries int) processors.Factory {
|
||||
return func() (processors.Processor, error) {
|
||||
return NewProcessor(maxEntries)
|
||||
}
|
||||
}
|
||||
|
||||
// Process implements processors.Processor.
|
||||
func (p *Processor) Process(_ context.Context, in event.Event) (*event.Event, error) {
|
||||
if p == nil {
|
||||
return nil, fmt.Errorf("dedupe: processor is nil")
|
||||
}
|
||||
if p.maxEntries <= 0 {
|
||||
return nil, fmt.Errorf("dedupe: processor maxEntries must be > 0")
|
||||
}
|
||||
|
||||
id := strings.TrimSpace(in.ID)
|
||||
if id == "" {
|
||||
return nil, fmt.Errorf("dedupe: event ID is required")
|
||||
}
|
||||
|
||||
p.mu.Lock()
|
||||
|
||||
if p.order == nil || p.byID == nil {
|
||||
p.mu.Unlock()
|
||||
return nil, fmt.Errorf("dedupe: processor is not initialized")
|
||||
}
|
||||
|
||||
if elem, exists := p.byID[id]; exists {
|
||||
p.order.MoveToFront(elem)
|
||||
p.mu.Unlock()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
elem := p.order.PushFront(id)
|
||||
p.byID[id] = elem
|
||||
|
||||
if p.order.Len() > p.maxEntries {
|
||||
oldest := p.order.Back()
|
||||
if oldest != nil {
|
||||
p.order.Remove(oldest)
|
||||
if oldestID, ok := oldest.Value.(string); ok {
|
||||
delete(p.byID, oldestID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
p.mu.Unlock()
|
||||
|
||||
out := in
|
||||
return &out, nil
|
||||
}
|
||||
163
processors/dedupe/processor_test.go
Normal file
163
processors/dedupe/processor_test.go
Normal file
@@ -0,0 +1,163 @@
|
||||
package dedupe
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"gitea.maximumdirect.net/ejr/feedkit/event"
|
||||
"gitea.maximumdirect.net/ejr/feedkit/processors"
|
||||
)
|
||||
|
||||
func TestNewProcessorValidation(t *testing.T) {
|
||||
t.Run("rejects non-positive maxEntries", func(t *testing.T) {
|
||||
for _, maxEntries := range []int{0, -1} {
|
||||
p, err := NewProcessor(maxEntries)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error for maxEntries=%d, got nil", maxEntries)
|
||||
}
|
||||
if p != nil {
|
||||
t.Fatalf("expected nil processor for maxEntries=%d", maxEntries)
|
||||
}
|
||||
if !strings.Contains(err.Error(), "maxEntries") {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("accepts positive maxEntries", func(t *testing.T) {
|
||||
p, err := NewProcessor(1)
|
||||
if err != nil {
|
||||
t.Fatalf("NewProcessor error: %v", err)
|
||||
}
|
||||
if p == nil {
|
||||
t.Fatalf("expected processor, got nil")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestProcessorFirstSeenAndDuplicate(t *testing.T) {
|
||||
p, err := NewProcessor(8)
|
||||
if err != nil {
|
||||
t.Fatalf("NewProcessor error: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
first := testEvent("evt-1")
|
||||
|
||||
out, err := p.Process(ctx, first)
|
||||
if err != nil {
|
||||
t.Fatalf("Process first error: %v", err)
|
||||
}
|
||||
if out == nil {
|
||||
t.Fatalf("expected first event to pass through")
|
||||
}
|
||||
if out.ID != first.ID {
|
||||
t.Fatalf("expected unchanged ID %q, got %q", first.ID, out.ID)
|
||||
}
|
||||
|
||||
out, err = p.Process(ctx, first)
|
||||
if err != nil {
|
||||
t.Fatalf("Process duplicate error: %v", err)
|
||||
}
|
||||
if out != nil {
|
||||
t.Fatalf("expected duplicate to be dropped, got %#v", out)
|
||||
}
|
||||
|
||||
out, err = p.Process(ctx, testEvent("evt-2"))
|
||||
if err != nil {
|
||||
t.Fatalf("Process second unique error: %v", err)
|
||||
}
|
||||
if out == nil {
|
||||
t.Fatalf("expected second unique event to pass through")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessorLRUEvictionAndPromotion(t *testing.T) {
|
||||
p, err := NewProcessor(2)
|
||||
if err != nil {
|
||||
t.Fatalf("NewProcessor error: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
mustPass(t, p, ctx, "a")
|
||||
mustPass(t, p, ctx, "b")
|
||||
mustDrop(t, p, ctx, "a") // promote "a" so "b" becomes least-recently-used
|
||||
mustPass(t, p, ctx, "c") // evicts "b"
|
||||
mustDrop(t, p, ctx, "a") // "a" should still be tracked after promotion
|
||||
mustPass(t, p, ctx, "b") // "b" was evicted, so now it passes again
|
||||
}
|
||||
|
||||
func TestProcessorRejectsBlankID(t *testing.T) {
|
||||
p, err := NewProcessor(4)
|
||||
if err != nil {
|
||||
t.Fatalf("NewProcessor error: %v", err)
|
||||
}
|
||||
|
||||
in := testEvent(" ")
|
||||
out, err := p.Process(context.Background(), in)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error for blank ID")
|
||||
}
|
||||
if out != nil {
|
||||
t.Fatalf("expected nil output on error, got %#v", out)
|
||||
}
|
||||
if !strings.Contains(err.Error(), "event ID is required") {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFactoryWithRegistry(t *testing.T) {
|
||||
r := processors.NewRegistry()
|
||||
r.Register("dedupe", Factory(3))
|
||||
|
||||
p, err := r.Build("dedupe")
|
||||
if err != nil {
|
||||
t.Fatalf("Build error: %v", err)
|
||||
}
|
||||
if p == nil {
|
||||
t.Fatalf("expected processor, got nil")
|
||||
}
|
||||
|
||||
out, err := p.Process(context.Background(), testEvent("evt-factory-1"))
|
||||
if err != nil {
|
||||
t.Fatalf("Process error: %v", err)
|
||||
}
|
||||
if out == nil {
|
||||
t.Fatalf("expected first event to pass through")
|
||||
}
|
||||
}
|
||||
|
||||
func mustPass(t *testing.T, p *Processor, ctx context.Context, id string) {
|
||||
t.Helper()
|
||||
out, err := p.Process(ctx, testEvent(id))
|
||||
if err != nil {
|
||||
t.Fatalf("expected pass for id=%q, got error: %v", id, err)
|
||||
}
|
||||
if out == nil {
|
||||
t.Fatalf("expected pass for id=%q, got drop", id)
|
||||
}
|
||||
}
|
||||
|
||||
func mustDrop(t *testing.T, p *Processor, ctx context.Context, id string) {
|
||||
t.Helper()
|
||||
out, err := p.Process(ctx, testEvent(id))
|
||||
if err != nil {
|
||||
t.Fatalf("expected drop for id=%q, got error: %v", id, err)
|
||||
}
|
||||
if out != nil {
|
||||
t.Fatalf("expected drop for id=%q, got output", id)
|
||||
}
|
||||
}
|
||||
|
||||
func testEvent(id string) event.Event {
|
||||
return event.Event{
|
||||
ID: id,
|
||||
Kind: event.Kind("observation"),
|
||||
Source: "source-1",
|
||||
EmittedAt: time.Now().UTC(),
|
||||
Payload: map[string]any{"ok": true},
|
||||
}
|
||||
}
|
||||
@@ -9,11 +9,13 @@
|
||||
// Example:
|
||||
//
|
||||
// reg := processors.NewRegistry()
|
||||
// reg.Register("dedupe", dedupe.Factory(10_000))
|
||||
// reg.Register("normalize", func() (processors.Processor, error) {
|
||||
// // import "gitea.maximumdirect.net/ejr/feedkit/processors/normalize"
|
||||
// return normalize.NewProcessor(myNormalizers, false), nil
|
||||
// })
|
||||
//
|
||||
// chain, err := reg.BuildChain([]string{"normalize"})
|
||||
// chain, err := reg.BuildChain([]string{"dedupe", "normalize"})
|
||||
// if err != nil {
|
||||
// // handle wiring error
|
||||
// }
|
||||
|
||||
84
processors/normalize/helpers.go
Normal file
84
processors/normalize/helpers.go
Normal file
@@ -0,0 +1,84 @@
|
||||
package normalize
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"gitea.maximumdirect.net/ejr/feedkit/event"
|
||||
)
|
||||
|
||||
// PayloadJSONBytes extracts a JSON payload into bytes suitable for json.Unmarshal.
|
||||
//
|
||||
// Supported payload shapes:
|
||||
// - json.RawMessage
|
||||
// - []byte
|
||||
// - string
|
||||
// - map[string]any
|
||||
func PayloadJSONBytes(e event.Event) ([]byte, error) {
|
||||
if e.Payload == nil {
|
||||
return nil, fmt.Errorf("payload is nil")
|
||||
}
|
||||
|
||||
switch v := e.Payload.(type) {
|
||||
case json.RawMessage:
|
||||
if len(v) == 0 {
|
||||
return nil, fmt.Errorf("payload is empty json.RawMessage")
|
||||
}
|
||||
return []byte(v), nil
|
||||
case []byte:
|
||||
if len(v) == 0 {
|
||||
return nil, fmt.Errorf("payload is empty []byte")
|
||||
}
|
||||
return v, nil
|
||||
case string:
|
||||
if v == "" {
|
||||
return nil, fmt.Errorf("payload is empty string")
|
||||
}
|
||||
return []byte(v), nil
|
||||
case map[string]any:
|
||||
b, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal map payload: %w", err)
|
||||
}
|
||||
return b, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported payload type %T", e.Payload)
|
||||
}
|
||||
}
|
||||
|
||||
// DecodeJSONPayload extracts the event payload as bytes and unmarshals it into T.
|
||||
func DecodeJSONPayload[T any](in event.Event) (T, error) {
|
||||
var zero T
|
||||
|
||||
b, err := PayloadJSONBytes(in)
|
||||
if err != nil {
|
||||
return zero, fmt.Errorf("extract payload: %w", err)
|
||||
}
|
||||
|
||||
var parsed T
|
||||
if err := json.Unmarshal(b, &parsed); err != nil {
|
||||
return zero, fmt.Errorf("decode raw payload: %w", err)
|
||||
}
|
||||
|
||||
return parsed, nil
|
||||
}
|
||||
|
||||
// FinalizeEvent builds the output event envelope by copying the input and applying
|
||||
// the new schema/payload, plus optional EffectiveAt.
|
||||
func FinalizeEvent(in event.Event, outSchema string, outPayload any, effectiveAt time.Time) (*event.Event, error) {
|
||||
out := in
|
||||
out.Schema = outSchema
|
||||
out.Payload = outPayload
|
||||
|
||||
if !effectiveAt.IsZero() {
|
||||
t := effectiveAt.UTC()
|
||||
out.EffectiveAt = &t
|
||||
}
|
||||
|
||||
if err := out.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &out, nil
|
||||
}
|
||||
118
processors/normalize/helpers_test.go
Normal file
118
processors/normalize/helpers_test.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package normalize
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"gitea.maximumdirect.net/ejr/feedkit/event"
|
||||
)
|
||||
|
||||
func TestPayloadJSONBytesSupportedShapes(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
payload any
|
||||
want string
|
||||
}{
|
||||
{name: "rawmessage", payload: json.RawMessage(`{"a":1}`), want: `{"a":1}`},
|
||||
{name: "bytes", payload: []byte(`{"a":2}`), want: `{"a":2}`},
|
||||
{name: "string", payload: `{"a":3}`, want: `{"a":3}`},
|
||||
{name: "map", payload: map[string]any{"a": 4}, want: `{"a":4}`},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got, err := PayloadJSONBytes(event.Event{Payload: tc.payload})
|
||||
if err != nil {
|
||||
t.Fatalf("PayloadJSONBytes() unexpected error: %v", err)
|
||||
}
|
||||
if string(got) != tc.want {
|
||||
t.Fatalf("PayloadJSONBytes() = %s, want %s", string(got), tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPayloadJSONBytesRejectsInvalidPayloads(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
payload any
|
||||
want string
|
||||
}{
|
||||
{name: "nil", payload: nil, want: "payload is nil"},
|
||||
{name: "empty rawmessage", payload: json.RawMessage{}, want: "payload is empty json.RawMessage"},
|
||||
{name: "empty bytes", payload: []byte{}, want: "payload is empty []byte"},
|
||||
{name: "empty string", payload: "", want: "payload is empty string"},
|
||||
{name: "unsupported", payload: 123, want: "unsupported payload type"},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
_, err := PayloadJSONBytes(event.Event{Payload: tc.payload})
|
||||
if err == nil {
|
||||
t.Fatalf("PayloadJSONBytes() expected error")
|
||||
}
|
||||
if !strings.Contains(err.Error(), tc.want) {
|
||||
t.Fatalf("PayloadJSONBytes() error = %q, want substring %q", err, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeJSONPayload(t *testing.T) {
|
||||
type payload struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
got, err := DecodeJSONPayload[payload](event.Event{
|
||||
Payload: json.RawMessage(`{"name":"alice"}`),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("DecodeJSONPayload() unexpected error: %v", err)
|
||||
}
|
||||
if got.Name != "alice" {
|
||||
t.Fatalf("DecodeJSONPayload() = %#v, want name alice", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFinalizeEventPreservesEnvelopeAndEffectiveAtBehavior(t *testing.T) {
|
||||
existingEffectiveAt := time.Date(2026, 3, 28, 11, 0, 0, 0, time.UTC)
|
||||
in := event.Event{
|
||||
ID: "evt-1",
|
||||
Kind: event.Kind("observation"),
|
||||
Source: "source-a",
|
||||
EmittedAt: time.Date(2026, 3, 28, 12, 0, 0, 0, time.UTC),
|
||||
EffectiveAt: &existingEffectiveAt,
|
||||
Schema: "raw.example.v1",
|
||||
Payload: map[string]any{"old": true},
|
||||
}
|
||||
|
||||
out, err := FinalizeEvent(in, "example.v1", map[string]any{"value": 1.234567}, time.Time{})
|
||||
if err != nil {
|
||||
t.Fatalf("FinalizeEvent() unexpected error: %v", err)
|
||||
}
|
||||
if out.ID != in.ID || out.Kind != in.Kind || out.Source != in.Source || out.EmittedAt != in.EmittedAt {
|
||||
t.Fatalf("FinalizeEvent() changed preserved envelope fields: %#v", out)
|
||||
}
|
||||
if out.EffectiveAt == nil || !out.EffectiveAt.Equal(existingEffectiveAt) {
|
||||
t.Fatalf("FinalizeEvent() effectiveAt = %#v, want preserved existing value", out.EffectiveAt)
|
||||
}
|
||||
|
||||
nextEffectiveAt := time.Date(2026, 3, 28, 13, 0, 0, 0, time.FixedZone("x", -4*3600))
|
||||
out, err = FinalizeEvent(in, "example.v1", map[string]any{"value": 1.234567}, nextEffectiveAt)
|
||||
if err != nil {
|
||||
t.Fatalf("FinalizeEvent() unexpected overwrite error: %v", err)
|
||||
}
|
||||
if out.EffectiveAt == nil || !out.EffectiveAt.Equal(nextEffectiveAt.UTC()) {
|
||||
t.Fatalf("FinalizeEvent() effectiveAt = %#v, want %s", out.EffectiveAt, nextEffectiveAt.UTC())
|
||||
}
|
||||
|
||||
payloadMap, ok := out.Payload.(map[string]any)
|
||||
if !ok {
|
||||
t.Fatalf("FinalizeEvent() payload type = %T, want map[string]any", out.Payload)
|
||||
}
|
||||
if payloadMap["value"] != 1.234567 {
|
||||
t.Fatalf("FinalizeEvent() payload value = %#v, want unrounded 1.234567", payloadMap["value"])
|
||||
}
|
||||
}
|
||||
13
sinks/doc.go
13
sinks/doc.go
@@ -29,7 +29,7 @@
|
||||
// and feedkit ownership:
|
||||
// - downstream code registers table schema + event mapping functions
|
||||
// - feedkit manages DB connection, create-if-missing DDL, transactional
|
||||
// inserts, and prune helpers
|
||||
// inserts, optional automatic retention pruning, and manual prune helpers
|
||||
//
|
||||
// Example config:
|
||||
//
|
||||
@@ -40,6 +40,13 @@
|
||||
// uri: postgres://localhost:5432/feedkit?sslmode=disable
|
||||
// username: feedkit_user
|
||||
// password: feedkit_pass
|
||||
// prune: 3d # optional: prune rows older than now-3d on each write tx
|
||||
//
|
||||
// params.prune supports:
|
||||
// - Go duration strings (72h, 90m, 30s, ...)
|
||||
// - day/week suffixes (3d, 2w)
|
||||
//
|
||||
// If params.prune is omitted, automatic pruning is disabled.
|
||||
//
|
||||
// Example downstream wiring:
|
||||
//
|
||||
@@ -53,7 +60,7 @@
|
||||
// {Name: "payload_json", Type: "JSONB", Nullable: false},
|
||||
// },
|
||||
// PrimaryKey: []string{"event_id"},
|
||||
// PruneColumn: "emitted_at",
|
||||
// PruneColumn: "emitted_at", // required for retention pruning
|
||||
// },
|
||||
// },
|
||||
// MapEvent: func(ctx context.Context, e event.Event) ([]sinks.PostgresWrite, error) {
|
||||
@@ -74,7 +81,7 @@
|
||||
// },
|
||||
// })
|
||||
//
|
||||
// Pruning via type assertion:
|
||||
// Manual pruning via type assertion (administrative helpers):
|
||||
//
|
||||
// if p, ok := sink.(sinks.PostgresPruner); ok {
|
||||
// _, _ = p.PruneKeepLatest(ctx, "events", 10000)
|
||||
|
||||
27
sinks/helpers.go
Normal file
27
sinks/helpers.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package sinks
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"gitea.maximumdirect.net/ejr/feedkit/config"
|
||||
)
|
||||
|
||||
// RegisterPostgresSchemaForConfiguredSinks registers one Postgres schema for each
|
||||
// configured sink using driver=postgres.
|
||||
func RegisterPostgresSchemaForConfiguredSinks(cfg *config.Config, schema PostgresSchema) error {
|
||||
if cfg == nil {
|
||||
return fmt.Errorf("register postgres schemas: config is nil")
|
||||
}
|
||||
|
||||
for i, sk := range cfg.Sinks {
|
||||
if !strings.EqualFold(strings.TrimSpace(sk.Driver), "postgres") {
|
||||
continue
|
||||
}
|
||||
if err := RegisterPostgresSchema(sk.Name, schema); err != nil {
|
||||
return fmt.Errorf("register postgres schema for sinks[%d] name=%q: %w", i, sk.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
86
sinks/helpers_test.go
Normal file
86
sinks/helpers_test.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package sinks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"gitea.maximumdirect.net/ejr/feedkit/config"
|
||||
"gitea.maximumdirect.net/ejr/feedkit/event"
|
||||
)
|
||||
|
||||
func TestRegisterPostgresSchemaForConfiguredSinksNilConfig(t *testing.T) {
|
||||
err := RegisterPostgresSchemaForConfiguredSinks(nil, testPostgresSchema())
|
||||
if err == nil {
|
||||
t.Fatalf("RegisterPostgresSchemaForConfiguredSinks(nil) expected error")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "config is nil") {
|
||||
t.Fatalf("error = %q, want config is nil", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegisterPostgresSchemaForConfiguredSinksNonPostgresNoOp(t *testing.T) {
|
||||
cfg := &config.Config{
|
||||
Sinks: []config.SinkConfig{
|
||||
{Name: uniqueSinkName("stdout"), Driver: "stdout"},
|
||||
{Name: uniqueSinkName("nats"), Driver: "nats"},
|
||||
},
|
||||
}
|
||||
|
||||
if err := RegisterPostgresSchemaForConfiguredSinks(cfg, testPostgresSchema()); err != nil {
|
||||
t.Fatalf("RegisterPostgresSchemaForConfiguredSinks(non-postgres) error = %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegisterPostgresSchemaForConfiguredSinksDuplicateRegistrationFails(t *testing.T) {
|
||||
cfg := &config.Config{
|
||||
Sinks: []config.SinkConfig{
|
||||
{Name: uniqueSinkName("pg"), Driver: "postgres"},
|
||||
},
|
||||
}
|
||||
|
||||
if err := RegisterPostgresSchemaForConfiguredSinks(cfg, testPostgresSchema()); err != nil {
|
||||
t.Fatalf("first RegisterPostgresSchemaForConfiguredSinks() error = %v", err)
|
||||
}
|
||||
|
||||
err := RegisterPostgresSchemaForConfiguredSinks(cfg, testPostgresSchema())
|
||||
if err == nil {
|
||||
t.Fatalf("second RegisterPostgresSchemaForConfiguredSinks() expected duplicate error")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "already registered") {
|
||||
t.Fatalf("error = %q, want already registered", err)
|
||||
}
|
||||
}
|
||||
|
||||
func testPostgresSchema() PostgresSchema {
|
||||
return PostgresSchema{
|
||||
Tables: []PostgresTable{
|
||||
{
|
||||
Name: "events",
|
||||
Columns: []PostgresColumn{
|
||||
{Name: "event_id", Type: "TEXT", Nullable: false},
|
||||
{Name: "emitted_at", Type: "TIMESTAMPTZ", Nullable: false},
|
||||
},
|
||||
PrimaryKey: []string{"event_id"},
|
||||
PruneColumn: "emitted_at",
|
||||
},
|
||||
},
|
||||
MapEvent: func(_ context.Context, e event.Event) ([]PostgresWrite, error) {
|
||||
return []PostgresWrite{
|
||||
{
|
||||
Table: "events",
|
||||
Values: map[string]any{
|
||||
"event_id": e.ID,
|
||||
"emitted_at": e.EmittedAt,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func uniqueSinkName(prefix string) string {
|
||||
return fmt.Sprintf("%s_%d", prefix, time.Now().UnixNano())
|
||||
}
|
||||
@@ -22,6 +22,10 @@ type postgresTx interface {
|
||||
Rollback() error
|
||||
}
|
||||
|
||||
type postgresExecer interface {
|
||||
ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error)
|
||||
}
|
||||
|
||||
type postgresDB interface {
|
||||
PingContext(ctx context.Context) error
|
||||
BeginTx(ctx context.Context, opts *sql.TxOptions) (postgresTx, error)
|
||||
@@ -81,6 +85,7 @@ type PostgresSink struct {
|
||||
name string
|
||||
db postgresDB
|
||||
schema postgresSchemaCompiled
|
||||
pruneWindow time.Duration
|
||||
}
|
||||
|
||||
func NewPostgresSinkFromConfig(cfg config.SinkConfig) (Sink, error) {
|
||||
@@ -96,6 +101,10 @@ func NewPostgresSinkFromConfig(cfg config.SinkConfig) (Sink, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pruneWindow, err := parsePostgresPruneWindow(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
schema, ok := lookupPostgresSchema(cfg.Name)
|
||||
if !ok {
|
||||
@@ -112,7 +121,7 @@ func NewPostgresSinkFromConfig(cfg config.SinkConfig) (Sink, error) {
|
||||
return nil, fmt.Errorf("postgres sink %q: open db: %w", cfg.Name, err)
|
||||
}
|
||||
|
||||
s := &PostgresSink{name: cfg.Name, db: db, schema: schema}
|
||||
s := &PostgresSink{name: cfg.Name, db: db, schema: schema, pruneWindow: pruneWindow}
|
||||
if err := s.initialize(); err != nil {
|
||||
_ = db.Close()
|
||||
return nil, err
|
||||
@@ -168,6 +177,15 @@ func (p *PostgresSink) Consume(ctx context.Context, e event.Event) error {
|
||||
return fmt.Errorf("postgres sink: insert into %q: %w", tbl.name, err)
|
||||
}
|
||||
}
|
||||
if p.pruneWindow > 0 {
|
||||
cutoff := time.Now().UTC().Add(-p.pruneWindow)
|
||||
for _, tableName := range p.schema.tableOrder {
|
||||
tbl := p.schema.tables[tableName]
|
||||
if _, err := execPruneOlderThan(ctx, tx, tbl, cutoff); err != nil {
|
||||
return fmt.Errorf("postgres sink: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return fmt.Errorf("postgres sink: commit tx: %w", err)
|
||||
@@ -214,19 +232,9 @@ func (p *PostgresSink) PruneOlderThan(ctx context.Context, table string, cutoff
|
||||
return 0, err
|
||||
}
|
||||
|
||||
query := fmt.Sprintf(
|
||||
`DELETE FROM %s WHERE %s < $1`,
|
||||
quotePostgresIdent(tbl.name),
|
||||
quotePostgresIdent(tbl.pruneColumn),
|
||||
)
|
||||
|
||||
res, err := p.db.ExecContext(ctx, query, cutoff)
|
||||
rows, err := execPruneOlderThan(ctx, p.db, tbl, cutoff)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("postgres sink: prune older than table %q: %w", tbl.name, err)
|
||||
}
|
||||
rows, err := res.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("postgres sink: prune older than table %q rows affected: %w", tbl.name, err)
|
||||
return 0, fmt.Errorf("postgres sink: %w", err)
|
||||
}
|
||||
return rows, nil
|
||||
}
|
||||
@@ -309,6 +317,77 @@ func buildPostgresDSN(uri, username, password string) (string, error) {
|
||||
return u.String(), nil
|
||||
}
|
||||
|
||||
func parsePostgresPruneWindow(cfg config.SinkConfig) (time.Duration, error) {
|
||||
raw, ok := cfg.Params["prune"]
|
||||
if !ok || raw == nil {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
s, ok := raw.(string)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("sink %q: params.prune must be a string duration (e.g. 72h, 3d, 2w)", cfg.Name)
|
||||
}
|
||||
|
||||
d, err := parsePostgresPruneDuration(s)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("sink %q: params.prune %q is invalid: %w", cfg.Name, s, err)
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func parsePostgresPruneDuration(raw string) (time.Duration, error) {
|
||||
s := strings.TrimSpace(raw)
|
||||
if s == "" {
|
||||
return 0, fmt.Errorf("must not be empty")
|
||||
}
|
||||
|
||||
lower := strings.ToLower(s)
|
||||
if strings.HasSuffix(lower, "d") || strings.HasSuffix(lower, "w") {
|
||||
unit := lower[len(lower)-1]
|
||||
n, err := strconv.Atoi(strings.TrimSpace(lower[:len(lower)-1]))
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("must use a positive integer before %q", string(unit))
|
||||
}
|
||||
if n <= 0 {
|
||||
return 0, fmt.Errorf("must be > 0")
|
||||
}
|
||||
if unit == 'd' {
|
||||
return time.Duration(n) * 24 * time.Hour, nil
|
||||
}
|
||||
return time.Duration(n) * 7 * 24 * time.Hour, nil
|
||||
}
|
||||
|
||||
d, err := time.ParseDuration(s)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("must be a Go duration or use d/w suffixes")
|
||||
}
|
||||
if d <= 0 {
|
||||
return 0, fmt.Errorf("must be > 0")
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func buildPruneOlderThanSQL(tbl postgresTableCompiled) string {
|
||||
return fmt.Sprintf(
|
||||
`DELETE FROM %s WHERE %s < $1`,
|
||||
quotePostgresIdent(tbl.name),
|
||||
quotePostgresIdent(tbl.pruneColumn),
|
||||
)
|
||||
}
|
||||
|
||||
func execPruneOlderThan(ctx context.Context, execer postgresExecer, tbl postgresTableCompiled, cutoff time.Time) (int64, error) {
|
||||
query := buildPruneOlderThanSQL(tbl)
|
||||
res, err := execer.ExecContext(ctx, query, cutoff)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("prune older than table %q: %w", tbl.name, err)
|
||||
}
|
||||
rows, err := res.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("prune older than table %q rows affected: %w", tbl.name, err)
|
||||
}
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
func buildCreateTableSQL(tbl postgresTableCompiled) string {
|
||||
defs := make([]string, 0, len(tbl.columnOrder)+1)
|
||||
for _, colName := range tbl.columnOrder {
|
||||
|
||||
@@ -395,6 +395,94 @@ func TestNewPostgresSinkFromConfig_InitFailureClosesDB(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewPostgresSinkFromConfig_PruneParamAccepted(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
in string
|
||||
want time.Duration
|
||||
}{
|
||||
{name: "go duration", in: "72h", want: 72 * time.Hour},
|
||||
{name: "days suffix", in: "3d", want: 72 * time.Hour},
|
||||
{name: "weeks suffix", in: "2w", want: 14 * 24 * time.Hour},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
withPostgresTestState(t)
|
||||
|
||||
err := RegisterPostgresSchema("pg", schemaOneTable(func(_ context.Context, _ event.Event) ([]PostgresWrite, error) {
|
||||
return nil, nil
|
||||
}))
|
||||
if err != nil {
|
||||
t.Fatalf("register schema: %v", err)
|
||||
}
|
||||
|
||||
openPostgresDB = func(_ string) (postgresDB, error) {
|
||||
return &fakeDB{}, nil
|
||||
}
|
||||
|
||||
s, err := NewPostgresSinkFromConfig(config.SinkConfig{
|
||||
Name: "pg",
|
||||
Driver: "postgres",
|
||||
Params: map[string]any{
|
||||
"uri": "postgres://localhost/db",
|
||||
"username": "user",
|
||||
"password": "pass",
|
||||
"prune": tc.in,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("new postgres sink: %v", err)
|
||||
}
|
||||
|
||||
pg, ok := s.(*PostgresSink)
|
||||
if !ok {
|
||||
t.Fatalf("expected *PostgresSink, got %T", s)
|
||||
}
|
||||
if pg.pruneWindow != tc.want {
|
||||
t.Fatalf("prune window = %s, want %s", pg.pruneWindow, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewPostgresSinkFromConfig_PruneParamRejected(t *testing.T) {
|
||||
withPostgresTestState(t)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
in any
|
||||
}{
|
||||
{name: "empty", in: ""},
|
||||
{name: "zero", in: "0"},
|
||||
{name: "negative", in: "-1h"},
|
||||
{name: "malformed", in: "abc"},
|
||||
{name: "fractional day", in: "1.5d"},
|
||||
{name: "wrong type", in: 5},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
_, err := NewPostgresSinkFromConfig(config.SinkConfig{
|
||||
Name: "pg",
|
||||
Driver: "postgres",
|
||||
Params: map[string]any{
|
||||
"uri": "postgres://localhost/db",
|
||||
"username": "user",
|
||||
"password": "pass",
|
||||
"prune": tc.in,
|
||||
},
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("expected error")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "params.prune") {
|
||||
t.Fatalf("expected params.prune error, got %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostgresSinkConsume_InvalidEvent(t *testing.T) {
|
||||
db := &fakeDB{}
|
||||
called := 0
|
||||
@@ -497,6 +585,71 @@ func TestPostgresSinkConsume_InsertFailureRollsBack(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostgresSinkConsume_AutoPruneRunsInSameTransaction(t *testing.T) {
|
||||
tx := &fakeTx{}
|
||||
db := &fakeDB{tx: tx}
|
||||
sink := &PostgresSink{
|
||||
name: "pg",
|
||||
db: db,
|
||||
schema: mustCompileSchema(t, schemaTwoTables(func(_ context.Context, e event.Event) ([]PostgresWrite, error) {
|
||||
return []PostgresWrite{
|
||||
{Table: "events", Values: map[string]any{"event_id": e.ID, "emitted_at": e.EmittedAt}},
|
||||
{Table: "event_payloads", Values: map[string]any{"event_id": e.ID, "payload_json": `{}`, "emitted_at": e.EmittedAt}},
|
||||
}, nil
|
||||
})),
|
||||
pruneWindow: 24 * time.Hour,
|
||||
}
|
||||
|
||||
if err := sink.Consume(context.Background(), validTestEvent()); err != nil {
|
||||
t.Fatalf("consume: %v", err)
|
||||
}
|
||||
if len(tx.execCalls) != 4 {
|
||||
t.Fatalf("expected 4 tx statements (2 inserts + 2 prunes), got %d", len(tx.execCalls))
|
||||
}
|
||||
if !strings.Contains(tx.execCalls[2].query, `DELETE FROM "events"`) {
|
||||
t.Fatalf("expected prune delete for events, got %s", tx.execCalls[2].query)
|
||||
}
|
||||
if !strings.Contains(tx.execCalls[3].query, `DELETE FROM "event_payloads"`) {
|
||||
t.Fatalf("expected prune delete for event_payloads, got %s", tx.execCalls[3].query)
|
||||
}
|
||||
if tx.commitCalls != 1 {
|
||||
t.Fatalf("expected one commit, got %d", tx.commitCalls)
|
||||
}
|
||||
if tx.rollbackCalls != 0 {
|
||||
t.Fatalf("expected zero rollbacks, got %d", tx.rollbackCalls)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostgresSinkConsume_AutoPruneFailureRollsBack(t *testing.T) {
|
||||
tx := &fakeTx{execErrOnCall: 3, execErr: errors.New("prune failed")}
|
||||
db := &fakeDB{tx: tx}
|
||||
sink := &PostgresSink{
|
||||
name: "pg",
|
||||
db: db,
|
||||
schema: mustCompileSchema(t, schemaTwoTables(func(_ context.Context, e event.Event) ([]PostgresWrite, error) {
|
||||
return []PostgresWrite{
|
||||
{Table: "events", Values: map[string]any{"event_id": e.ID, "emitted_at": e.EmittedAt}},
|
||||
{Table: "event_payloads", Values: map[string]any{"event_id": e.ID, "payload_json": `{}`, "emitted_at": e.EmittedAt}},
|
||||
}, nil
|
||||
})),
|
||||
pruneWindow: 24 * time.Hour,
|
||||
}
|
||||
|
||||
err := sink.Consume(context.Background(), validTestEvent())
|
||||
if err == nil {
|
||||
t.Fatalf("expected prune error")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "prune older than") {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if tx.commitCalls != 0 {
|
||||
t.Fatalf("expected no commit")
|
||||
}
|
||||
if tx.rollbackCalls != 1 {
|
||||
t.Fatalf("expected rollback, got %d", tx.rollbackCalls)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostgresSinkPrune_PerTable(t *testing.T) {
|
||||
db := &fakeDB{execRows: 7}
|
||||
sink := &PostgresSink{
|
||||
|
||||
@@ -11,4 +11,14 @@
|
||||
//
|
||||
// A single source may emit 0..N events per poll or stream iteration, and those
|
||||
// events may span multiple event kinds.
|
||||
//
|
||||
// HTTP-backed polling sources can share NewHTTPSource for generic HTTP config
|
||||
// parsing and conditional GET behavior. The helper understands:
|
||||
// - params.url
|
||||
// - params.user_agent
|
||||
// - params.conditional (optional, default true)
|
||||
//
|
||||
// When validators are available, NewHTTPSource prefers ETag/If-None-Match and
|
||||
// falls back to Last-Modified/If-Modified-Since. A 304 Not Modified response is
|
||||
// treated as a successful unchanged poll.
|
||||
package sources
|
||||
|
||||
145
sources/helpers.go
Normal file
145
sources/helpers.go
Normal file
@@ -0,0 +1,145 @@
|
||||
package sources
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"gitea.maximumdirect.net/ejr/feedkit/config"
|
||||
"gitea.maximumdirect.net/ejr/feedkit/event"
|
||||
)
|
||||
|
||||
// DefaultEventID applies feedkit's default Event.ID policy:
|
||||
//
|
||||
// - If upstream provides an ID, use it (trimmed).
|
||||
// - Otherwise, ID is "<Source>:<EffectiveAt>" when available.
|
||||
// - If EffectiveAt is unavailable, fall back to "<Source>:<EmittedAt>".
|
||||
//
|
||||
// Timestamps are encoded as RFC3339Nano in UTC.
|
||||
func DefaultEventID(upstreamID, sourceName string, effectiveAt *time.Time, emittedAt time.Time) string {
|
||||
if id := strings.TrimSpace(upstreamID); id != "" {
|
||||
return id
|
||||
}
|
||||
|
||||
src := strings.TrimSpace(sourceName)
|
||||
if src == "" {
|
||||
src = "UNKNOWN_SOURCE"
|
||||
}
|
||||
|
||||
if effectiveAt != nil && !effectiveAt.IsZero() {
|
||||
return fmt.Sprintf("%s:%s", src, effectiveAt.UTC().Format(time.RFC3339Nano))
|
||||
}
|
||||
|
||||
t := emittedAt.UTC()
|
||||
if t.IsZero() {
|
||||
t = time.Now().UTC()
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s:%s", src, t.Format(time.RFC3339Nano))
|
||||
}
|
||||
|
||||
// SingleEvent constructs, validates, and returns a slice containing exactly one event.
|
||||
func SingleEvent(
|
||||
kind event.Kind,
|
||||
sourceName string,
|
||||
schema string,
|
||||
id string,
|
||||
emittedAt time.Time,
|
||||
effectiveAt *time.Time,
|
||||
payload any,
|
||||
) ([]event.Event, error) {
|
||||
if emittedAt.IsZero() {
|
||||
emittedAt = time.Now().UTC()
|
||||
} else {
|
||||
emittedAt = emittedAt.UTC()
|
||||
}
|
||||
|
||||
e := event.Event{
|
||||
ID: id,
|
||||
Kind: kind,
|
||||
Source: sourceName,
|
||||
EmittedAt: emittedAt,
|
||||
EffectiveAt: effectiveAt,
|
||||
Schema: schema,
|
||||
Payload: payload,
|
||||
}
|
||||
|
||||
if err := e.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return []event.Event{e}, nil
|
||||
}
|
||||
|
||||
// ValidateExpectedKinds checks that configured source expected kinds are a subset
|
||||
// of the kinds advertised by the built source, when the source exposes kind
|
||||
// metadata. If the source does not advertise kinds, the check is skipped.
|
||||
func ValidateExpectedKinds(cfg config.SourceConfig, in Input) error {
|
||||
expectedKinds, err := parseExpectedKinds(cfg.ExpectedKinds())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(expectedKinds) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
advertisedKinds := advertisedSourceKinds(in)
|
||||
if len(advertisedKinds) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for kind := range expectedKinds {
|
||||
if !advertisedKinds[kind] {
|
||||
return fmt.Errorf(
|
||||
"configured expected kind %q not advertised by source (configured=%v advertised=%v)",
|
||||
kind,
|
||||
sortedKinds(expectedKinds),
|
||||
sortedKinds(advertisedKinds),
|
||||
)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseExpectedKinds(raw []string) (map[event.Kind]bool, error) {
|
||||
kinds := map[event.Kind]bool{}
|
||||
for i, k := range raw {
|
||||
kind, err := event.ParseKind(k)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid expected kind at index %d (%q): %w", i, k, err)
|
||||
}
|
||||
kinds[kind] = true
|
||||
}
|
||||
return kinds, nil
|
||||
}
|
||||
|
||||
func advertisedSourceKinds(in Input) map[event.Kind]bool {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
kinds := map[event.Kind]bool{}
|
||||
if ks, ok := in.(KindsSource); ok {
|
||||
for _, kind := range ks.Kinds() {
|
||||
kinds[kind] = true
|
||||
}
|
||||
return kinds
|
||||
}
|
||||
|
||||
if ks, ok := in.(KindSource); ok {
|
||||
kinds[ks.Kind()] = true
|
||||
return kinds
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func sortedKinds(kindSet map[event.Kind]bool) []string {
|
||||
out := make([]string, 0, len(kindSet))
|
||||
for kind := range kindSet {
|
||||
out = append(out, string(kind))
|
||||
}
|
||||
sort.Strings(out)
|
||||
return out
|
||||
}
|
||||
131
sources/helpers_test.go
Normal file
131
sources/helpers_test.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package sources
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"gitea.maximumdirect.net/ejr/feedkit/config"
|
||||
"gitea.maximumdirect.net/ejr/feedkit/event"
|
||||
)
|
||||
|
||||
type testInput struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (s testInput) Name() string { return s.name }
|
||||
|
||||
type testKindSource struct {
|
||||
testInput
|
||||
kind event.Kind
|
||||
}
|
||||
|
||||
func (s testKindSource) Kind() event.Kind { return s.kind }
|
||||
|
||||
type testKindsSource struct {
|
||||
testInput
|
||||
kinds []event.Kind
|
||||
}
|
||||
|
||||
func (s testKindsSource) Kinds() []event.Kind { return s.kinds }
|
||||
|
||||
func TestValidateExpectedKindsLegacyKindFallback(t *testing.T) {
|
||||
cfg := config.SourceConfig{Kind: "observation"}
|
||||
in := testKindSource{
|
||||
testInput: testInput{name: "test"},
|
||||
kind: event.Kind("observation"),
|
||||
}
|
||||
|
||||
if err := ValidateExpectedKinds(cfg, in); err != nil {
|
||||
t.Fatalf("ValidateExpectedKinds() unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateExpectedKindsSubsetAllowed(t *testing.T) {
|
||||
cfg := config.SourceConfig{Kinds: []string{"observation"}}
|
||||
in := testKindsSource{
|
||||
testInput: testInput{name: "test"},
|
||||
kinds: []event.Kind{"observation", "forecast"},
|
||||
}
|
||||
|
||||
if err := ValidateExpectedKinds(cfg, in); err != nil {
|
||||
t.Fatalf("ValidateExpectedKinds() unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateExpectedKindsMismatchFails(t *testing.T) {
|
||||
cfg := config.SourceConfig{Kinds: []string{"alert"}}
|
||||
in := testKindsSource{
|
||||
testInput: testInput{name: "test"},
|
||||
kinds: []event.Kind{"observation", "forecast"},
|
||||
}
|
||||
|
||||
err := ValidateExpectedKinds(cfg, in)
|
||||
if err == nil {
|
||||
t.Fatalf("ValidateExpectedKinds() expected mismatch error, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "configured expected kind") {
|
||||
t.Fatalf("ValidateExpectedKinds() error %q does not include expected message", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateExpectedKindsNoMetadataSkipsCheck(t *testing.T) {
|
||||
cfg := config.SourceConfig{Kinds: []string{"alert"}}
|
||||
in := testInput{name: "test"}
|
||||
|
||||
if err := ValidateExpectedKinds(cfg, in); err != nil {
|
||||
t.Fatalf("ValidateExpectedKinds() unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultEventIDUsesUpstreamID(t *testing.T) {
|
||||
emittedAt := time.Date(2026, 3, 28, 15, 4, 5, 123, time.UTC)
|
||||
got := DefaultEventID(" upstream-id ", "source", nil, emittedAt)
|
||||
if got != "upstream-id" {
|
||||
t.Fatalf("DefaultEventID() = %q, want upstream-id", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultEventIDPrefersEffectiveAt(t *testing.T) {
|
||||
effectiveAt := time.Date(2026, 3, 28, 16, 4, 5, 987654321, time.FixedZone("x", -6*3600))
|
||||
emittedAt := time.Date(2026, 3, 28, 15, 4, 5, 123, time.UTC)
|
||||
|
||||
got := DefaultEventID("", "source", &effectiveAt, emittedAt)
|
||||
want := "source:" + effectiveAt.UTC().Format(time.RFC3339Nano)
|
||||
if got != want {
|
||||
t.Fatalf("DefaultEventID() = %q, want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultEventIDFallsBackToEmittedAt(t *testing.T) {
|
||||
emittedAt := time.Date(2026, 3, 28, 15, 4, 5, 123456789, time.FixedZone("y", 3*3600))
|
||||
got := DefaultEventID("", "source", nil, emittedAt)
|
||||
want := "source:" + emittedAt.UTC().Format(time.RFC3339Nano)
|
||||
if got != want {
|
||||
t.Fatalf("DefaultEventID() = %q, want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSingleEventBuildsValidatedSlice(t *testing.T) {
|
||||
effectiveAt := time.Date(2026, 3, 28, 16, 0, 0, 0, time.UTC)
|
||||
emittedAt := time.Date(2026, 3, 28, 15, 0, 0, 0, time.FixedZone("z", -5*3600))
|
||||
|
||||
got, err := SingleEvent(
|
||||
event.Kind("observation"),
|
||||
"source-a",
|
||||
"raw.example.v1",
|
||||
"evt-1",
|
||||
emittedAt,
|
||||
&effectiveAt,
|
||||
map[string]any{"ok": true},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("SingleEvent() unexpected error: %v", err)
|
||||
}
|
||||
if len(got) != 1 {
|
||||
t.Fatalf("SingleEvent() len = %d, want 1", len(got))
|
||||
}
|
||||
if got[0].EmittedAt != emittedAt.UTC() {
|
||||
t.Fatalf("SingleEvent() emittedAt = %s, want %s", got[0].EmittedAt, emittedAt.UTC())
|
||||
}
|
||||
}
|
||||
147
sources/http.go
Normal file
147
sources/http.go
Normal file
@@ -0,0 +1,147 @@
|
||||
package sources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"gitea.maximumdirect.net/ejr/feedkit/config"
|
||||
"gitea.maximumdirect.net/ejr/feedkit/transport"
|
||||
)
|
||||
|
||||
// HTTPSource is a reusable helper for polling HTTP-backed sources.
|
||||
//
|
||||
// It centralizes generic source config parsing (`params.url`,
|
||||
// `params.user_agent`, and optional `params.conditional`), default HTTP client
|
||||
// setup, and conditional GET validator handling. Concrete daemon sources remain
|
||||
// responsible for decoding the response body and constructing events.
|
||||
type HTTPSource struct {
|
||||
Driver string
|
||||
Name string
|
||||
URL string
|
||||
UserAgent string
|
||||
Accept string
|
||||
Conditional bool
|
||||
Client *http.Client
|
||||
|
||||
mu sync.Mutex
|
||||
validators transport.HTTPValidators
|
||||
}
|
||||
|
||||
// NewHTTPSource builds a generic HTTP polling helper from SourceConfig.
|
||||
//
|
||||
// Required params:
|
||||
// - params.url
|
||||
// - params.user_agent
|
||||
//
|
||||
// Optional params:
|
||||
// - params.conditional (default true): enable conditional GET using cached
|
||||
// ETag / Last-Modified validators
|
||||
func NewHTTPSource(driver string, cfg config.SourceConfig, accept string) (*HTTPSource, error) {
|
||||
name := strings.TrimSpace(cfg.Name)
|
||||
if name == "" {
|
||||
return nil, fmt.Errorf("%s: name is required", driver)
|
||||
}
|
||||
if cfg.Params == nil {
|
||||
return nil, fmt.Errorf("%s %q: params are required (need params.url and params.user_agent)", driver, cfg.Name)
|
||||
}
|
||||
|
||||
url, ok := cfg.ParamString("url", "URL")
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%s %q: params.url is required", driver, cfg.Name)
|
||||
}
|
||||
|
||||
userAgent, ok := cfg.ParamString("user_agent", "userAgent")
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%s %q: params.user_agent is required", driver, cfg.Name)
|
||||
}
|
||||
|
||||
conditional, err := parseConditionalParam(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &HTTPSource{
|
||||
Driver: driver,
|
||||
Name: name,
|
||||
URL: url,
|
||||
UserAgent: userAgent,
|
||||
Accept: accept,
|
||||
Conditional: conditional,
|
||||
Client: transport.NewHTTPClient(transport.DefaultHTTPTimeout),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// FetchBytesIfChanged fetches the configured URL and reports whether the
|
||||
// upstream content changed. An unchanged 304 response returns changed=false
|
||||
// with no body and no error.
|
||||
func (s *HTTPSource) FetchBytesIfChanged(ctx context.Context) ([]byte, bool, error) {
|
||||
client := s.Client
|
||||
if client == nil {
|
||||
client = transport.NewHTTPClient(transport.DefaultHTTPTimeout)
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
validators := s.validators
|
||||
s.mu.Unlock()
|
||||
|
||||
body, changed, next, err := transport.FetchBodyIfChanged(
|
||||
ctx,
|
||||
client,
|
||||
s.URL,
|
||||
s.UserAgent,
|
||||
s.Accept,
|
||||
s.Conditional,
|
||||
validators,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("%s %q: %w", s.Driver, s.Name, err)
|
||||
}
|
||||
|
||||
if s.Conditional {
|
||||
s.mu.Lock()
|
||||
s.validators = next
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
return body, changed, nil
|
||||
}
|
||||
|
||||
// FetchJSONIfChanged fetches the configured URL and returns the raw response
|
||||
// body as json.RawMessage when content changed. An unchanged 304 response
|
||||
// returns changed=false with a nil body and no error.
|
||||
func (s *HTTPSource) FetchJSONIfChanged(ctx context.Context) (json.RawMessage, bool, error) {
|
||||
body, changed, err := s.FetchBytesIfChanged(ctx)
|
||||
if err != nil || !changed {
|
||||
return nil, changed, err
|
||||
}
|
||||
return json.RawMessage(body), true, nil
|
||||
}
|
||||
|
||||
func parseConditionalParam(cfg config.SourceConfig) (bool, error) {
|
||||
raw, ok := cfg.Params["conditional"]
|
||||
if !ok || raw == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
switch v := raw.(type) {
|
||||
case bool:
|
||||
return v, nil
|
||||
case string:
|
||||
s := strings.TrimSpace(v)
|
||||
if s == "" {
|
||||
return false, fmt.Errorf("source %q: params.conditional must be a boolean", cfg.Name)
|
||||
}
|
||||
parsed, err := strconv.ParseBool(s)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("source %q: params.conditional must be a boolean", cfg.Name)
|
||||
}
|
||||
return parsed, nil
|
||||
default:
|
||||
return false, fmt.Errorf("source %q: params.conditional must be a boolean", cfg.Name)
|
||||
}
|
||||
}
|
||||
96
sources/http_test.go
Normal file
96
sources/http_test.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package sources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"gitea.maximumdirect.net/ejr/feedkit/config"
|
||||
)
|
||||
|
||||
func TestNewHTTPSourceConditionalDefaultsTrue(t *testing.T) {
|
||||
src, err := NewHTTPSource("test_driver", config.SourceConfig{
|
||||
Name: "test-source",
|
||||
Driver: "test_driver",
|
||||
Params: map[string]any{
|
||||
"url": "https://example.invalid",
|
||||
"user_agent": "test-agent",
|
||||
},
|
||||
}, "application/json")
|
||||
if err != nil {
|
||||
t.Fatalf("NewHTTPSource() error = %v", err)
|
||||
}
|
||||
if !src.Conditional {
|
||||
t.Fatalf("Conditional = false, want true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewHTTPSourceRejectsInvalidConditional(t *testing.T) {
|
||||
_, err := NewHTTPSource("test_driver", config.SourceConfig{
|
||||
Name: "test-source",
|
||||
Driver: "test_driver",
|
||||
Params: map[string]any{
|
||||
"url": "https://example.invalid",
|
||||
"user_agent": "test-agent",
|
||||
"conditional": "sometimes",
|
||||
},
|
||||
}, "application/json")
|
||||
if err == nil {
|
||||
t.Fatalf("NewHTTPSource() error = nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHTTPSourceFetchJSONIfChanged(t *testing.T) {
|
||||
var call int
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
call++
|
||||
switch call {
|
||||
case 1:
|
||||
w.Header().Set("ETag", `"v1"`)
|
||||
_, _ = w.Write([]byte(`{"ok":true}`))
|
||||
case 2:
|
||||
if got := r.Header.Get("If-None-Match"); got != `"v1"` {
|
||||
t.Fatalf("second request If-None-Match = %q", got)
|
||||
}
|
||||
w.WriteHeader(http.StatusNotModified)
|
||||
default:
|
||||
t.Fatalf("unexpected call count %d", call)
|
||||
}
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
src, err := NewHTTPSource("test_driver", config.SourceConfig{
|
||||
Name: "test-source",
|
||||
Driver: "test_driver",
|
||||
Params: map[string]any{
|
||||
"url": srv.URL,
|
||||
"user_agent": "test-agent",
|
||||
},
|
||||
}, "application/json")
|
||||
if err != nil {
|
||||
t.Fatalf("NewHTTPSource() error = %v", err)
|
||||
}
|
||||
|
||||
raw, changed, err := src.FetchJSONIfChanged(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("first FetchJSONIfChanged() error = %v", err)
|
||||
}
|
||||
if !changed {
|
||||
t.Fatalf("first FetchJSONIfChanged() changed = false, want true")
|
||||
}
|
||||
if got := string(raw); got != `{"ok":true}` {
|
||||
t.Fatalf("first FetchJSONIfChanged() body = %q", got)
|
||||
}
|
||||
|
||||
raw, changed, err = src.FetchJSONIfChanged(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("second FetchJSONIfChanged() error = %v", err)
|
||||
}
|
||||
if changed {
|
||||
t.Fatalf("second FetchJSONIfChanged() changed = true, want false")
|
||||
}
|
||||
if raw != nil {
|
||||
t.Fatalf("second FetchJSONIfChanged() body = %q, want nil", string(raw))
|
||||
}
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -28,7 +29,80 @@ func NewHTTPClient(timeout time.Duration) *http.Client {
|
||||
}
|
||||
|
||||
func FetchBody(ctx context.Context, client *http.Client, url, userAgent, accept string) ([]byte, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
res, err := doRequest(ctx, client, http.MethodGet, url, userAgent, accept, "", "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
if res.StatusCode < 200 || res.StatusCode >= 300 {
|
||||
return nil, fmt.Errorf("HTTP %s", res.Status)
|
||||
}
|
||||
|
||||
return readValidatedBody(res.Body)
|
||||
}
|
||||
|
||||
// HTTPValidators are cache validators learned from prior successful GET responses.
|
||||
//
|
||||
// ETag is preferred when present. LastModified is used as a fallback validator
|
||||
// when ETag is unavailable.
|
||||
type HTTPValidators struct {
|
||||
ETag string
|
||||
LastModified string
|
||||
}
|
||||
|
||||
// FetchBodyIfChanged performs an HTTP GET and opportunistically uses conditional
|
||||
// request headers based on the provided validators.
|
||||
//
|
||||
// Behavior:
|
||||
// - if conditional is false, this behaves like a normal GET and leaves validators unchanged
|
||||
// - if validators.ETag is set, sends If-None-Match
|
||||
// - else if validators.LastModified is set, sends If-Modified-Since
|
||||
// - 304 Not Modified is treated as success with changed=false and no body
|
||||
// - 200 responses are treated as changed=true and still enforce the normal body checks
|
||||
//
|
||||
// Returned validators reflect any updates learned from the response headers.
|
||||
func FetchBodyIfChanged(
|
||||
ctx context.Context,
|
||||
client *http.Client,
|
||||
url, userAgent, accept string,
|
||||
conditional bool,
|
||||
validators HTTPValidators,
|
||||
) ([]byte, bool, HTTPValidators, error) {
|
||||
headerName, headerValue := conditionalHeader(conditional, validators)
|
||||
|
||||
res, err := doRequest(ctx, client, http.MethodGet, url, userAgent, accept, headerName, headerValue)
|
||||
if err != nil {
|
||||
return nil, false, validators, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
switch res.StatusCode {
|
||||
case http.StatusNotModified:
|
||||
if conditional {
|
||||
validators = refreshValidators(validators, res.Header)
|
||||
}
|
||||
return nil, false, validators, nil
|
||||
default:
|
||||
if res.StatusCode < 200 || res.StatusCode >= 300 {
|
||||
return nil, false, validators, fmt.Errorf("HTTP %s", res.Status)
|
||||
}
|
||||
}
|
||||
|
||||
b, err := readValidatedBody(res.Body)
|
||||
if err != nil {
|
||||
return nil, false, validators, err
|
||||
}
|
||||
|
||||
if conditional {
|
||||
validators = replaceValidators(res.Header)
|
||||
}
|
||||
|
||||
return b, true, validators, nil
|
||||
}
|
||||
|
||||
func doRequest(ctx context.Context, client *http.Client, method, url, userAgent, accept, headerName, headerValue string) (*http.Response, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, method, url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -39,19 +113,46 @@ func FetchBody(ctx context.Context, client *http.Client, url, userAgent, accept
|
||||
if accept != "" {
|
||||
req.Header.Set("Accept", accept)
|
||||
}
|
||||
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
if res.StatusCode < 200 || res.StatusCode >= 300 {
|
||||
return nil, fmt.Errorf("HTTP %s", res.Status)
|
||||
if headerName != "" && headerValue != "" {
|
||||
req.Header.Set(headerName, headerValue)
|
||||
}
|
||||
|
||||
return client.Do(req)
|
||||
}
|
||||
|
||||
func conditionalHeader(enabled bool, validators HTTPValidators) (string, string) {
|
||||
if !enabled {
|
||||
return "", ""
|
||||
}
|
||||
if etag := strings.TrimSpace(validators.ETag); etag != "" {
|
||||
return "If-None-Match", etag
|
||||
}
|
||||
if lastModified := strings.TrimSpace(validators.LastModified); lastModified != "" {
|
||||
return "If-Modified-Since", lastModified
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
|
||||
func replaceValidators(header http.Header) HTTPValidators {
|
||||
return HTTPValidators{
|
||||
ETag: strings.TrimSpace(header.Get("ETag")),
|
||||
LastModified: strings.TrimSpace(header.Get("Last-Modified")),
|
||||
}
|
||||
}
|
||||
|
||||
func refreshValidators(current HTTPValidators, header http.Header) HTTPValidators {
|
||||
if etag := strings.TrimSpace(header.Get("ETag")); etag != "" {
|
||||
current.ETag = etag
|
||||
}
|
||||
if lastModified := strings.TrimSpace(header.Get("Last-Modified")); lastModified != "" {
|
||||
current.LastModified = lastModified
|
||||
}
|
||||
return current
|
||||
}
|
||||
|
||||
func readValidatedBody(r io.Reader) ([]byte, error) {
|
||||
// Read at most maxResponseBodyBytes + 1 so we can detect overflow.
|
||||
limited := io.LimitReader(res.Body, maxResponseBodyBytes+1)
|
||||
limited := io.LimitReader(r, maxResponseBodyBytes+1)
|
||||
|
||||
b, err := io.ReadAll(limited)
|
||||
if err != nil {
|
||||
|
||||
232
transport/http_test.go
Normal file
232
transport/http_test.go
Normal file
@@ -0,0 +1,232 @@
|
||||
package transport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFetchBodyIfChangedPrefersETagAndTreats304AsUnchanged(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
var call int
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
call++
|
||||
switch call {
|
||||
case 1:
|
||||
if got := r.Header.Get("If-None-Match"); got != "" {
|
||||
t.Fatalf("first request If-None-Match = %q, want empty", got)
|
||||
}
|
||||
if got := r.Header.Get("If-Modified-Since"); got != "" {
|
||||
t.Fatalf("first request If-Modified-Since = %q, want empty", got)
|
||||
}
|
||||
w.Header().Set("ETag", `"v1"`)
|
||||
w.Header().Set("Last-Modified", "Mon, 02 Jan 2006 15:04:05 GMT")
|
||||
_, _ = w.Write([]byte(`{"ok":true}`))
|
||||
case 2:
|
||||
if got := r.Header.Get("If-None-Match"); got != `"v1"` {
|
||||
t.Fatalf("second request If-None-Match = %q, want %q", got, `"v1"`)
|
||||
}
|
||||
if got := r.Header.Get("If-Modified-Since"); got != "" {
|
||||
t.Fatalf("second request If-Modified-Since = %q, want empty when ETag is cached", got)
|
||||
}
|
||||
w.WriteHeader(http.StatusNotModified)
|
||||
default:
|
||||
t.Fatalf("unexpected call count %d", call)
|
||||
}
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
validators := HTTPValidators{}
|
||||
body, changed, next, err := FetchBodyIfChanged(context.Background(), srv.Client(), srv.URL, "test-agent", "application/json", true, validators)
|
||||
if err != nil {
|
||||
t.Fatalf("first FetchBodyIfChanged() error = %v", err)
|
||||
}
|
||||
if !changed {
|
||||
t.Fatalf("first FetchBodyIfChanged() changed = false, want true")
|
||||
}
|
||||
if got := string(body); got != `{"ok":true}` {
|
||||
t.Fatalf("first FetchBodyIfChanged() body = %q", got)
|
||||
}
|
||||
if got := next.ETag; got != `"v1"` {
|
||||
t.Fatalf("cached ETag = %q, want %q", got, `"v1"`)
|
||||
}
|
||||
if got := next.LastModified; got != "Mon, 02 Jan 2006 15:04:05 GMT" {
|
||||
t.Fatalf("cached Last-Modified = %q", got)
|
||||
}
|
||||
|
||||
body, changed, next, err = FetchBodyIfChanged(context.Background(), srv.Client(), srv.URL, "test-agent", "application/json", true, next)
|
||||
if err != nil {
|
||||
t.Fatalf("second FetchBodyIfChanged() error = %v", err)
|
||||
}
|
||||
if changed {
|
||||
t.Fatalf("second FetchBodyIfChanged() changed = true, want false")
|
||||
}
|
||||
if body != nil {
|
||||
t.Fatalf("second FetchBodyIfChanged() body = %q, want nil", string(body))
|
||||
}
|
||||
if got := next.ETag; got != `"v1"` {
|
||||
t.Fatalf("cached ETag after 304 = %q, want %q", got, `"v1"`)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchBodyIfChangedFallsBackToIfModifiedSince(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
var call int
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
call++
|
||||
switch call {
|
||||
case 1:
|
||||
w.Header().Set("Last-Modified", "Tue, 03 Jan 2006 15:04:05 GMT")
|
||||
_, _ = w.Write([]byte(`first`))
|
||||
case 2:
|
||||
if got := r.Header.Get("If-None-Match"); got != "" {
|
||||
t.Fatalf("second request If-None-Match = %q, want empty", got)
|
||||
}
|
||||
if got := r.Header.Get("If-Modified-Since"); got != "Tue, 03 Jan 2006 15:04:05 GMT" {
|
||||
t.Fatalf("second request If-Modified-Since = %q", got)
|
||||
}
|
||||
w.WriteHeader(http.StatusNotModified)
|
||||
default:
|
||||
t.Fatalf("unexpected call count %d", call)
|
||||
}
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
_, changed, validators, err := FetchBodyIfChanged(context.Background(), srv.Client(), srv.URL, "test-agent", "", true, HTTPValidators{})
|
||||
if err != nil {
|
||||
t.Fatalf("first FetchBodyIfChanged() error = %v", err)
|
||||
}
|
||||
if !changed {
|
||||
t.Fatalf("first FetchBodyIfChanged() changed = false, want true")
|
||||
}
|
||||
if got := validators.LastModified; got != "Tue, 03 Jan 2006 15:04:05 GMT" {
|
||||
t.Fatalf("cached Last-Modified = %q", got)
|
||||
}
|
||||
|
||||
_, changed, _, err = FetchBodyIfChanged(context.Background(), srv.Client(), srv.URL, "test-agent", "", true, validators)
|
||||
if err != nil {
|
||||
t.Fatalf("second FetchBodyIfChanged() error = %v", err)
|
||||
}
|
||||
if changed {
|
||||
t.Fatalf("second FetchBodyIfChanged() changed = true, want false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchBodyIfChangedClearsValidatorsOn200WithoutValidators(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
var call int
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
call++
|
||||
switch call {
|
||||
case 1:
|
||||
w.Header().Set("ETag", `"v1"`)
|
||||
_, _ = w.Write([]byte(`first`))
|
||||
case 2:
|
||||
if got := r.Header.Get("If-None-Match"); got != `"v1"` {
|
||||
t.Fatalf("second request If-None-Match = %q", got)
|
||||
}
|
||||
_, _ = w.Write([]byte(`second`))
|
||||
case 3:
|
||||
if got := r.Header.Get("If-None-Match"); got != "" {
|
||||
t.Fatalf("third request If-None-Match = %q, want empty", got)
|
||||
}
|
||||
if got := r.Header.Get("If-Modified-Since"); got != "" {
|
||||
t.Fatalf("third request If-Modified-Since = %q, want empty", got)
|
||||
}
|
||||
_, _ = w.Write([]byte(`third`))
|
||||
default:
|
||||
t.Fatalf("unexpected call count %d", call)
|
||||
}
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
_, _, validators, err := FetchBodyIfChanged(context.Background(), srv.Client(), srv.URL, "test-agent", "", true, HTTPValidators{})
|
||||
if err != nil {
|
||||
t.Fatalf("first FetchBodyIfChanged() error = %v", err)
|
||||
}
|
||||
_, _, validators, err = FetchBodyIfChanged(context.Background(), srv.Client(), srv.URL, "test-agent", "", true, validators)
|
||||
if err != nil {
|
||||
t.Fatalf("second FetchBodyIfChanged() error = %v", err)
|
||||
}
|
||||
if validators.ETag != "" || validators.LastModified != "" {
|
||||
t.Fatalf("validators after 200 without validators = %+v, want cleared", validators)
|
||||
}
|
||||
_, _, _, err = FetchBodyIfChanged(context.Background(), srv.Client(), srv.URL, "test-agent", "", true, validators)
|
||||
if err != nil {
|
||||
t.Fatalf("third FetchBodyIfChanged() error = %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchBodyIfChangedConditionalDisabledSkipsConditionalHeaders(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
var calls int
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
calls++
|
||||
if got := r.Header.Get("If-None-Match"); got != "" {
|
||||
t.Fatalf("request If-None-Match = %q, want empty", got)
|
||||
}
|
||||
if got := r.Header.Get("If-Modified-Since"); got != "" {
|
||||
t.Fatalf("request If-Modified-Since = %q, want empty", got)
|
||||
}
|
||||
_, _ = w.Write([]byte(`body`))
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
validators := HTTPValidators{ETag: `"v1"`, LastModified: "Wed, 04 Jan 2006 15:04:05 GMT"}
|
||||
_, changed, next, err := FetchBodyIfChanged(context.Background(), srv.Client(), srv.URL, "test-agent", "", false, validators)
|
||||
if err != nil {
|
||||
t.Fatalf("FetchBodyIfChanged() error = %v", err)
|
||||
}
|
||||
if !changed {
|
||||
t.Fatalf("FetchBodyIfChanged() changed = false, want true")
|
||||
}
|
||||
if next != validators {
|
||||
t.Fatalf("validators changed when conditional disabled: got %+v want %+v", next, validators)
|
||||
}
|
||||
if calls != 1 {
|
||||
t.Fatalf("calls = %d, want 1", calls)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchBodyIfChangedAllowsEmpty304ButRejectsEmpty200(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
notModified := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNotModified)
|
||||
}))
|
||||
defer notModified.Close()
|
||||
|
||||
_, changed, _, err := FetchBodyIfChanged(
|
||||
context.Background(),
|
||||
notModified.Client(),
|
||||
notModified.URL,
|
||||
"test-agent",
|
||||
"",
|
||||
true,
|
||||
HTTPValidators{ETag: `"v1"`},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("304 FetchBodyIfChanged() error = %v", err)
|
||||
}
|
||||
if changed {
|
||||
t.Fatalf("304 FetchBodyIfChanged() changed = true, want false")
|
||||
}
|
||||
|
||||
emptyBody := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
defer emptyBody.Close()
|
||||
|
||||
_, _, _, err = FetchBodyIfChanged(context.Background(), emptyBody.Client(), emptyBody.URL, "test-agent", "", true, HTTPValidators{})
|
||||
if err == nil {
|
||||
t.Fatalf("empty 200 FetchBodyIfChanged() error = nil, want error")
|
||||
}
|
||||
if err.Error() != "empty response body" {
|
||||
t.Fatalf("empty 200 FetchBodyIfChanged() error = %q", err)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user