Skip to content

Track last scraped time per metric #227

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
May 5, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 14 additions & 16 deletions collector/collector.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,8 +91,10 @@ func NewExporter(logger *slog.Logger, cfg *Config) (*Exporter, error) {
Name: "dbtype",
Help: "Type of database the exporter is connected to (0=non-CDB, 1=CDB, >1=PDB).",
}),
logger: logger,
config: cfg,
logger: logger,
config: cfg,
lastScraped: map[string]*time.Time{},
scrapeInterval: cfg.ScrapeInterval,
}
e.metricsToScrape = e.DefaultMetrics()
err := e.connect()
Expand Down Expand Up @@ -131,7 +133,7 @@ func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
// they are running scheduled scrapes we should only scrape new data
// on the interval
if e.scrapeInterval != nil && *e.scrapeInterval != 0 {
if e.scrapeInterval != 0 {
// read access must be checked
e.mu.Lock()
for _, r := range e.scrapeResults {
Expand All @@ -155,12 +157,10 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) {

// RunScheduledScrapes is only relevant for users of this package that want to set the scrape on a timer
// rather than letting it be per Collect call
func (e *Exporter) RunScheduledScrapes(ctx context.Context, si time.Duration) {
e.scrapeInterval = &si

func (e *Exporter) RunScheduledScrapes(ctx context.Context) {
e.doScrape(time.Now())

ticker := time.NewTicker(si)
ticker := time.NewTicker(e.scrapeInterval)
defer ticker.Stop()

for {
Expand All @@ -177,7 +177,6 @@ func (e *Exporter) RunScheduledScrapes(ctx context.Context, si time.Duration) {
func (e *Exporter) doScrape(tick time.Time) {
e.mu.Lock() // ensure no simultaneous scrapes
e.scheduledScrape(&tick)
e.lastTick = &tick
e.mu.Unlock()
}

Expand Down Expand Up @@ -246,7 +245,9 @@ func (e *Exporter) scrape(ch chan<- prometheus.Metric, tick *time.Time) {

for _, metric := range e.metricsToScrape.Metric {
metric := metric //https://golang.org/doc/faq#closures_and_goroutines

if !e.isScrapeMetric(tick, metric) {
continue
}
go func() {
e.logger.Debug("About to scrape metric",
"Context", metric.Context,
Expand Down Expand Up @@ -476,13 +477,10 @@ func (e *Exporter) reloadMetrics() {
// ScrapeMetric is an interface method to call scrapeGenericValues using Metric struct values
func (e *Exporter) ScrapeMetric(db *sql.DB, ch chan<- prometheus.Metric, m Metric, tick *time.Time) error {
e.logger.Debug("Calling function ScrapeGenericValues()")
if e.isScrapeMetric(tick, m) {
queryTimeout := e.getQueryTimeout(m)
return e.scrapeGenericValues(db, ch, m.Context, m.Labels, m.MetricsDesc,
m.MetricsType, m.MetricsBuckets, m.FieldToAppend, m.IgnoreZeroResult,
m.Request, queryTimeout)
}
return nil
queryTimeout := e.getQueryTimeout(m)
return e.scrapeGenericValues(db, ch, m.Context, m.Labels, m.MetricsDesc,
m.MetricsType, m.MetricsBuckets, m.FieldToAppend, m.IgnoreZeroResult,
m.Request, queryTimeout)
}

// generic method for retrieving metrics.
Expand Down
17 changes: 12 additions & 5 deletions collector/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,18 +13,25 @@ import (
// and the time since the last scrape is less than the custom scrape interval.
// If there is no tick time or last known tick, the metric is always scraped.
func (e *Exporter) isScrapeMetric(tick *time.Time, metric Metric) bool {
// Always scrape the metric if we don't have a current or last known tick.
if tick == nil || e.lastTick == nil {
// Always scrape the metric if we don't have a current tick.
if tick == nil {
return true
}
// If the metric doesn't have a custom scrape interval, scrape it.
interval, ok := e.getScrapeInterval(metric.Context, metric.ScrapeInterval)
if !ok {
return true
}
// If the metric's scrape interval is less than the time elapsed since the last scrape,
// we should scrape the metric.
return interval < tick.Sub(*e.lastTick)
id := metric.id()
lastScraped := e.lastScraped[id]
shouldScrape := lastScraped == nil ||
// If the metric's scrape interval is less than the time elapsed since the last scrape,
// we should scrape the metric.
interval < tick.Sub(*lastScraped)
if shouldScrape {
e.lastScraped[id] = tick
}
return shouldScrape
}

func (e *Exporter) getScrapeInterval(context, scrapeInterval string) (time.Duration, bool) {
Expand Down
15 changes: 13 additions & 2 deletions collector/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"github.com/godror/godror/dsn"
"github.com/prometheus/client_golang/prometheus"
"log/slog"
"strings"
"sync"
"time"
)
Expand All @@ -16,7 +17,7 @@ type Exporter struct {
config *Config
mu *sync.Mutex
metricsToScrape Metrics
scrapeInterval *time.Duration
scrapeInterval time.Duration
user string
password string
connectString string
Expand All @@ -31,7 +32,7 @@ type Exporter struct {
dbtypeGauge prometheus.Gauge
db *sql.DB
logger *slog.Logger
lastTick *time.Time
lastScraped map[string]*time.Time
}

type Config struct {
Expand All @@ -49,6 +50,7 @@ type Config struct {
CustomMetrics string
QueryTimeout int
DefaultMetricsFile string
ScrapeInterval time.Duration
}

// Metric is an object description
Expand All @@ -69,3 +71,12 @@ type Metric struct {
type Metrics struct {
Metric []Metric
}

func (m Metric) id() string {
builder := strings.Builder{}
builder.WriteString(m.Context)
for _, d := range m.MetricsDesc {
builder.WriteString(d)
}
return builder.String()
}
3 changes: 2 additions & 1 deletion main.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,7 @@ func main() {
CustomMetrics: *customMetrics,
QueryTimeout: *queryTimeout,
DefaultMetricsFile: *defaultFileMetrics,
ScrapeInterval: *scrapeInterval,
}
exporter, err := collector.NewExporter(logger, config)
if err != nil {
Expand All @@ -129,7 +130,7 @@ func main() {
if *scrapeInterval != 0 {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go exporter.RunScheduledScrapes(ctx, *scrapeInterval)
go exporter.RunScheduledScrapes(ctx)
}

prometheus.MustRegister(exporter)
Expand Down