Merge pull request #647 from KeyboardNerd/spkg/cvrf

vulnsrc: Refactor debian and alpine sources
master
Sida Chen 6 years ago committed by GitHub
commit 05cbf328aa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -53,6 +53,20 @@ func ConvertFeatureSetToFeatures(features mapset.Set) []Feature {
return uniqueFeatures return uniqueFeatures
} }
// FindKeyValueAndRollback wraps session FindKeyValue function with begin and
// roll back.
func FindKeyValueAndRollback(datastore Datastore, key string) (value string, ok bool, err error) {
var tx Session
tx, err = datastore.Begin()
if err != nil {
return
}
defer tx.Rollback()
value, ok, err = tx.FindKeyValue(key)
return
}
// PersistPartialLayerAndCommit wraps session PersistLayer function with begin and // PersistPartialLayerAndCommit wraps session PersistLayer function with begin and
// commit. // commit.
func PersistPartialLayerAndCommit(datastore Datastore, layer *Layer) error { func PersistPartialLayerAndCommit(datastore Datastore, layer *Layer) error {

@ -18,10 +18,8 @@ package alpine
import ( import (
"io" "io"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"strings"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
@ -30,10 +28,13 @@ import (
"github.com/coreos/clair/ext/versionfmt" "github.com/coreos/clair/ext/versionfmt"
"github.com/coreos/clair/ext/versionfmt/dpkg" "github.com/coreos/clair/ext/versionfmt/dpkg"
"github.com/coreos/clair/ext/vulnsrc" "github.com/coreos/clair/ext/vulnsrc"
"github.com/coreos/clair/pkg/fsutil"
"github.com/coreos/clair/pkg/gitutil" "github.com/coreos/clair/pkg/gitutil"
) )
const ( const (
// This Alpine vulnerability database affects origin packages, which has
// `origin` field of itself.
secdbGitURL = "https://github.com/alpinelinux/alpine-secdb" secdbGitURL = "https://github.com/alpinelinux/alpine-secdb"
updaterFlag = "alpine-secdbUpdater" updaterFlag = "alpine-secdbUpdater"
nvdURLPrefix = "https://cve.mitre.org/cgi-bin/cvename.cgi?name=" nvdURLPrefix = "https://cve.mitre.org/cgi-bin/cvename.cgi?name="
@ -51,61 +52,44 @@ type updater struct {
} }
func (u *updater) Update(db database.Datastore) (resp vulnsrc.UpdateResponse, err error) { func (u *updater) Update(db database.Datastore) (resp vulnsrc.UpdateResponse, err error) {
log.WithField("package", "Alpine").Info("Start fetching vulnerabilities") log.WithField("package", "Alpine").Info("start fetching vulnerabilities")
// Pull the master branch. // Pull the master branch.
var commit string var (
u.repositoryLocalPath, commit, err = gitutil.CloneOrPull(secdbGitURL, u.repositoryLocalPath, updaterFlag) commit string
if err != nil { existingCommit string
return foundCommit bool
} namespaces []string
vulns []database.VulnerabilityWithAffected
// Open a database transaction. )
tx, err := db.Begin()
if err != nil { if u.repositoryLocalPath, commit, err = gitutil.CloneOrPull(secdbGitURL, u.repositoryLocalPath, updaterFlag); err != nil {
return return
} }
defer tx.Rollback()
// Ask the database for the latest commit we successfully applied.
var dbCommit string
var ok bool
dbCommit, ok, err = tx.FindKeyValue(updaterFlag)
if err != nil {
return
}
if !ok {
dbCommit = ""
}
// Set the updaterFlag to equal the commit processed. // Set the updaterFlag to equal the commit processed.
resp.FlagName = updaterFlag resp.FlagName = updaterFlag
resp.FlagValue = commit resp.FlagValue = commit
if existingCommit, foundCommit, err = database.FindKeyValueAndRollback(db, updaterFlag); err != nil {
return
}
// Short-circuit if there have been no updates. // Short-circuit if there have been no updates.
if commit == dbCommit { if foundCommit && commit == existingCommit {
log.WithField("package", "alpine").Debug("no update") log.WithField("package", "alpine").Debug("no update, skip")
return return
} }
// Get the list of namespaces from the repository. // Get the list of namespaces from the repository.
var namespaces []string if namespaces, err = fsutil.Readdir(u.repositoryLocalPath, fsutil.DirectoriesOnly); err != nil {
namespaces, err = ls(u.repositoryLocalPath, directoriesOnly)
if err != nil {
return return
} }
// Append any changed vulnerabilities to the response. // Append any changed vulnerabilities to the response.
for _, namespace := range namespaces { for _, namespace := range namespaces {
var vulns []database.VulnerabilityWithAffected if vulns, err = parseVulnsFromNamespace(u.repositoryLocalPath, namespace); err != nil {
var note string
vulns, note, err = parseVulnsFromNamespace(u.repositoryLocalPath, namespace)
if err != nil {
return return
} }
if note != "" {
resp.Notes = append(resp.Notes, note)
}
resp.Vulnerabilities = append(resp.Vulnerabilities, vulns...) resp.Vulnerabilities = append(resp.Vulnerabilities, vulns...)
} }
@ -118,74 +102,26 @@ func (u *updater) Clean() {
} }
} }
type lsFilter int func parseVulnsFromNamespace(repositoryPath, namespace string) (vulns []database.VulnerabilityWithAffected, err error) {
const (
filesOnly lsFilter = iota
directoriesOnly
)
func ls(path string, filter lsFilter) ([]string, error) {
dir, err := os.Open(path)
if err != nil {
return nil, err
}
defer dir.Close()
finfos, err := dir.Readdir(0)
if err != nil {
return nil, err
}
var files []string
for _, info := range finfos {
if filter == directoriesOnly && !info.IsDir() {
continue
}
if filter == filesOnly && info.IsDir() {
continue
}
if strings.HasPrefix(info.Name(), ".") {
continue
}
files = append(files, info.Name())
}
return files, nil
}
func parseVulnsFromNamespace(repositoryPath, namespace string) (vulns []database.VulnerabilityWithAffected, note string, err error) {
nsDir := filepath.Join(repositoryPath, namespace) nsDir := filepath.Join(repositoryPath, namespace)
var dbFilenames []string var dbFilenames []string
dbFilenames, err = ls(nsDir, filesOnly) if dbFilenames, err = fsutil.Readdir(nsDir, fsutil.FilesOnly); err != nil {
if err != nil {
return return
} }
for _, filename := range dbFilenames { for _, filename := range dbFilenames {
var file io.ReadCloser var db *secDB
file, err = os.Open(filepath.Join(nsDir, filename)) if db, err = newSecDB(filepath.Join(nsDir, filename)); err != nil {
if err != nil {
return
}
var fileVulns []database.VulnerabilityWithAffected
fileVulns, err = parseYAML(file)
if err != nil {
return return
} }
vulns = append(vulns, fileVulns...) vulns = append(vulns, db.Vulnerabilities()...)
file.Close()
} }
return return
} }
type secDBFile struct { type secDB struct {
Distro string `yaml:"distroversion"` Distro string `yaml:"distroversion"`
Packages []struct { Packages []struct {
Pkg struct { Pkg struct {
@ -195,42 +131,54 @@ type secDBFile struct {
} `yaml:"packages"` } `yaml:"packages"`
} }
func parseYAML(r io.Reader) (vulns []database.VulnerabilityWithAffected, err error) { func newSecDB(filePath string) (file *secDB, err error) {
var rBytes []byte var f io.ReadCloser
rBytes, err = ioutil.ReadAll(r) f, err = os.Open(filePath)
if err != nil { if err != nil {
return return
} }
var file secDBFile defer f.Close()
err = yaml.Unmarshal(rBytes, &file) file = &secDB{}
if err != nil { err = yaml.NewDecoder(f).Decode(file)
return
}
func (file *secDB) Vulnerabilities() (vulns []database.VulnerabilityWithAffected) {
if file == nil {
return return
} }
for _, pack := range file.Packages { namespace := database.Namespace{Name: "alpine:" + file.Distro, VersionFormat: dpkg.ParserName}
pkg := pack.Pkg for _, pkg := range file.Packages {
for version, vulnStrs := range pkg.Fixes { for version, cveNames := range pkg.Pkg.Fixes {
err := versionfmt.Valid(dpkg.ParserName, version) if err := versionfmt.Valid(dpkg.ParserName, version); err != nil {
if err != nil { log.WithError(err).WithFields(log.Fields{
log.WithError(err).WithField("version", version).Warning("could not parse package version. skipping") "version": version,
"package name": pkg.Pkg.Name,
}).Warning("could not parse package version, skipping")
continue continue
} }
for _, vulnStr := range vulnStrs { for _, cve := range cveNames {
var vuln database.VulnerabilityWithAffected vuln := database.VulnerabilityWithAffected{
vuln.Severity = database.UnknownSeverity Vulnerability: database.Vulnerability{
vuln.Name = vulnStr Name: cve,
vuln.Link = nvdURLPrefix + vulnStr Link: nvdURLPrefix + cve,
Severity: database.UnknownSeverity,
Namespace: namespace,
},
}
var fixedInVersion string var fixedInVersion string
if version != versionfmt.MaxVersion { if version != versionfmt.MaxVersion {
fixedInVersion = version fixedInVersion = version
} }
vuln.Affected = []database.AffectedFeature{ vuln.Affected = []database.AffectedFeature{
{ {
AffectedType: affectedType, AffectedType: affectedType,
FeatureName: pkg.Name, FeatureName: pkg.Pkg.Name,
AffectedVersion: version, AffectedVersion: version,
FixedInVersion: fixedInVersion, FixedInVersion: fixedInVersion,
Namespace: database.Namespace{ Namespace: database.Namespace{

@ -15,28 +15,24 @@
package alpine package alpine
import ( import (
"os"
"path/filepath" "path/filepath"
"runtime" "runtime"
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func TestYAMLParsing(t *testing.T) { func TestYAMLParsing(t *testing.T) {
_, filename, _, _ := runtime.Caller(0) _, filename, _, _ := runtime.Caller(0)
path := filepath.Join(filepath.Dir(filename)) path := filepath.Join(filepath.Dir(filename))
secdb, err := newSecDB(filepath.Join(path, "/testdata/v34_main.yaml"))
require.Nil(t, err)
vulns := secdb.Vulnerabilities()
testData, _ := os.Open(path + "/testdata/v34_main.yaml")
defer testData.Close()
vulns, err := parseYAML(testData)
if err != nil {
assert.Nil(t, err)
}
assert.Equal(t, 105, len(vulns)) assert.Equal(t, 105, len(vulns))
assert.Equal(t, "CVE-2016-5387", vulns[0].Name) assert.Equal(t, "CVE-2016-5387", vulns[0].Name)
assert.Equal(t, "alpine:v3.4", vulns[0].Affected[0].Namespace.Name) assert.Equal(t, "alpine:v3.4", vulns[0].Namespace.Name)
assert.Equal(t, "apache2", vulns[0].Affected[0].FeatureName) assert.Equal(t, "apache2", vulns[0].Affected[0].FeatureName)
assert.Equal(t, "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-5387", vulns[0].Link) assert.Equal(t, "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-5387", vulns[0].Link)
} }

@ -62,22 +62,9 @@ func init() {
func (u *updater) Update(datastore database.Datastore) (resp vulnsrc.UpdateResponse, err error) { func (u *updater) Update(datastore database.Datastore) (resp vulnsrc.UpdateResponse, err error) {
log.WithField("package", "Debian").Info("Start fetching vulnerabilities") log.WithField("package", "Debian").Info("Start fetching vulnerabilities")
latestHash, ok, err := database.FindKeyValueAndRollback(datastore, updaterFlag)
tx, err := datastore.Begin()
if err != nil {
return resp, err
}
// Get the hash of the latest update's JSON data
latestHash, ok, err := tx.FindKeyValue(updaterFlag)
if err != nil { if err != nil {
return resp, err return
}
// NOTE(sida): The transaction won't mutate the database and I want the
// transaction to be short.
if err := tx.Rollback(); err != nil {
return resp, err
} }
if !ok { if !ok {
@ -136,7 +123,7 @@ func buildResponse(jsonReader io.Reader, latestKnownHash string) (resp vulnsrc.U
// Calculate the hash and skip updating if the hash has been seen before. // Calculate the hash and skip updating if the hash has been seen before.
hash = hex.EncodeToString(jsonSHA.Sum(nil)) hash = hex.EncodeToString(jsonSHA.Sum(nil))
if latestKnownHash == hash { if latestKnownHash == hash {
log.WithField("package", "Debian").Debug("no update") log.WithField("package", "Debian").Debug("no update, skip")
return resp, nil return resp, nil
} }

@ -30,7 +30,7 @@ func TestDebianParser(t *testing.T) {
_, filename, _, _ := runtime.Caller(0) _, filename, _, _ := runtime.Caller(0)
// Test parsing testdata/fetcher_debian_test.json // Test parsing testdata/fetcher_debian_test.json
testFile, _ := os.Open(filepath.Join(filepath.Dir(filename)) + "/testdata/fetcher_debian_test.json") testFile, _ := os.Open(filepath.Join(filepath.Dir(filename), "/testdata/fetcher_debian_test.json"))
response, err := buildResponse(testFile, "") response, err := buildResponse(testFile, "")
if assert.Nil(t, err) && assert.Len(t, response.Vulnerabilities, 2) { if assert.Nil(t, err) && assert.Len(t, response.Vulnerabilities, 2) {
for _, vulnerability := range response.Vulnerabilities { for _, vulnerability := range response.Vulnerabilities {

@ -119,15 +119,9 @@ func compareELSA(left, right int) int {
func (u *updater) Update(datastore database.Datastore) (resp vulnsrc.UpdateResponse, err error) { func (u *updater) Update(datastore database.Datastore) (resp vulnsrc.UpdateResponse, err error) {
log.WithField("package", "Oracle Linux").Info("Start fetching vulnerabilities") log.WithField("package", "Oracle Linux").Info("Start fetching vulnerabilities")
// Get the first ELSA we have to manage. // Get the first ELSA we have to manage.
tx, err := datastore.Begin() flagValue, ok, err := database.FindKeyValueAndRollback(datastore, updaterFlag)
if err != nil { if err != nil {
return resp, err return
}
defer tx.Rollback()
flagValue, ok, err := tx.FindKeyValue(updaterFlag)
if err != nil {
return resp, err
} }
if !ok { if !ok {

@ -30,7 +30,7 @@ func TestOracleParser(t *testing.T) {
path := filepath.Join(filepath.Dir(filename)) path := filepath.Join(filepath.Dir(filename))
// Test parsing testdata/fetcher_oracle_test.1.xml // Test parsing testdata/fetcher_oracle_test.1.xml
testFile, _ := os.Open(path + "/testdata/fetcher_oracle_test.1.xml") testFile, _ := os.Open(filepath.Join(path, "/testdata/fetcher_oracle_test.1.xml"))
defer testFile.Close() defer testFile.Close()
vulnerabilities, err := parseELSA(testFile) vulnerabilities, err := parseELSA(testFile)
@ -78,7 +78,7 @@ func TestOracleParser(t *testing.T) {
} }
} }
testFile2, _ := os.Open(path + "/testdata/fetcher_oracle_test.2.xml") testFile2, _ := os.Open(filepath.Join(path, "/testdata/fetcher_oracle_test.2.xml"))
defer testFile2.Close() defer testFile2.Close()
vulnerabilities, err = parseELSA(testFile2) vulnerabilities, err = parseELSA(testFile2)

@ -101,21 +101,12 @@ func init() {
func (u *updater) Update(datastore database.Datastore) (resp vulnsrc.UpdateResponse, err error) { func (u *updater) Update(datastore database.Datastore) (resp vulnsrc.UpdateResponse, err error) {
log.WithField("package", "RHEL").Info("Start fetching vulnerabilities") log.WithField("package", "RHEL").Info("Start fetching vulnerabilities")
tx, err := datastore.Begin()
if err != nil {
return resp, err
}
// Get the first RHSA we have to manage. // Get the first RHSA we have to manage.
flagValue, ok, err := tx.FindKeyValue(updaterFlag) flagValue, ok, err := database.FindKeyValueAndRollback(datastore, updaterFlag)
if err != nil { if err != nil {
return resp, err return resp, err
} }
if err := tx.Rollback(); err != nil {
return resp, err
}
if !ok { if !ok {
flagValue = "" flagValue = ""
} }

@ -31,7 +31,7 @@ func TestRHELParserMultipleCVE(t *testing.T) {
path := filepath.Join(filepath.Dir(filename)) path := filepath.Join(filepath.Dir(filename))
// Test parsing testdata/fetcher_rhel_test.2.xml // Test parsing testdata/fetcher_rhel_test.2.xml
testFile, _ := os.Open(path + "/testdata/fetcher_rhel_test.2.xml") testFile, _ := os.Open(filepath.Join(path, "/testdata/fetcher_rhel_test.2.xml"))
vulnerabilities, err := parseRHSA(testFile) vulnerabilities, err := parseRHSA(testFile)
// Expected // Expected
@ -86,7 +86,7 @@ func TestRHELParserOneCVE(t *testing.T) {
path := filepath.Join(filepath.Dir(filename)) path := filepath.Join(filepath.Dir(filename))
// Test parsing testdata/fetcher_rhel_test.1.xml // Test parsing testdata/fetcher_rhel_test.1.xml
testFile, _ := os.Open(path + "/testdata/fetcher_rhel_test.1.xml") testFile, _ := os.Open(filepath.Join(path, "/testdata/fetcher_rhel_test.1.xml"))
vulnerabilities, err := parseRHSA(testFile) vulnerabilities, err := parseRHSA(testFile)
if assert.Nil(t, err) && assert.Len(t, vulnerabilities, 1) { if assert.Nil(t, err) && assert.Len(t, vulnerabilities, 1) {
assert.Equal(t, "CVE-2015-0252", vulnerabilities[0].Name) assert.Equal(t, "CVE-2015-0252", vulnerabilities[0].Name)

@ -22,6 +22,7 @@ import (
"fmt" "fmt"
"io" "io"
"os" "os"
"path/filepath"
"regexp" "regexp"
"strings" "strings"
@ -101,12 +102,11 @@ func (u *updater) Update(db database.Datastore) (resp vulnsrc.UpdateResponse, er
defer tx.Rollback() defer tx.Rollback()
// Ask the database for the latest commit we successfully applied. // Ask the database for the latest commit we successfully applied.
var dbCommit string dbCommit, ok, err := database.FindKeyValueAndRollback(db, updaterFlag)
var ok bool
dbCommit, ok, err = tx.FindKeyValue(updaterFlag)
if err != nil { if err != nil {
return return
} }
if !ok { if !ok {
dbCommit = "" dbCommit = ""
} }
@ -161,7 +161,7 @@ func collectModifiedVulnerabilities(commit, dbCommit, repositoryLocalPath string
func processDirectory(repositoryLocalPath, dirName string, modifiedCVE map[string]struct{}) error { func processDirectory(repositoryLocalPath, dirName string, modifiedCVE map[string]struct{}) error {
// Open the directory. // Open the directory.
d, err := os.Open(repositoryLocalPath + "/" + dirName) d, err := os.Open(filepath.Join(repositoryLocalPath, dirName))
if err != nil { if err != nil {
log.WithError(err).Error("could not open Ubuntu vulnerabilities repository's folder") log.WithError(err).Error("could not open Ubuntu vulnerabilities repository's folder")
return vulnsrc.ErrFilesystem return vulnsrc.ErrFilesystem
@ -191,7 +191,7 @@ func collectVulnerabilitiesAndNotes(repositoryLocalPath string, modifiedCVE map[
for cvePath := range modifiedCVE { for cvePath := range modifiedCVE {
// Open the CVE file. // Open the CVE file.
file, err := os.Open(repositoryLocalPath + "/" + cvePath) file, err := os.Open(filepath.Join(repositoryLocalPath, cvePath))
if err != nil { if err != nil {
// This can happen when a file is modified then moved in another commit. // This can happen when a file is modified then moved in another commit.
continue continue

@ -32,7 +32,7 @@ func TestUbuntuParser(t *testing.T) {
path := filepath.Join(filepath.Dir(filename)) path := filepath.Join(filepath.Dir(filename))
// Test parsing testdata/fetcher_ // Test parsing testdata/fetcher_
testData, _ := os.Open(path + "/testdata/fetcher_ubuntu_test.txt") testData, _ := os.Open(filepath.Join(path, "/testdata/fetcher_ubuntu_test.txt"))
defer testData.Close() defer testData.Close()
vulnerability, unknownReleases, err := parseUbuntuCVE(testData) vulnerability, unknownReleases, err := parseUbuntuCVE(testData)
if assert.Nil(t, err) { if assert.Nil(t, err) {

12
glide.lock generated

@ -1,5 +1,5 @@
hash: 3fd0e471868863d6ef4cd32bbcdc9b3d061911a15b458e7edd26cfba4faa62db hash: 208de0ba40f951c17ac45683952efdd6b14f5efbfb70dcdb493c52954d96cb75
updated: 2018-09-17T13:13:44.344244-04:00 updated: 2018-10-22T22:58:38.105092-04:00
imports: imports:
- name: github.com/beorn7/perks - name: github.com/beorn7/perks
version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
@ -49,7 +49,7 @@ imports:
subpackages: subpackages:
- simplelru - simplelru
- name: github.com/julienschmidt/httprouter - name: github.com/julienschmidt/httprouter
version: 8c199fb6259ffc1af525cc3ad52ee60ba8359669 version: 348b672cd90d8190f8240323e372ecd1e66b59dc
- name: github.com/lib/pq - name: github.com/lib/pq
version: 8837942c3e09574accbc5f150e2c5e057189cace version: 8837942c3e09574accbc5f150e2c5e057189cace
subpackages: subpackages:
@ -65,9 +65,10 @@ imports:
subpackages: subpackages:
- difflib - difflib
- name: github.com/prometheus/client_golang - name: github.com/prometheus/client_golang
version: c5b7fccd204277076155f10851dad72b76a49317 version: 1cafe34db7fdec6022e17e00e1c1ea501022f3e4
subpackages: subpackages:
- prometheus - prometheus
- prometheus/internal
- name: github.com/prometheus/client_model - name: github.com/prometheus/client_model
version: 6f3806018612930941127f2a7c6c453ba2c527d2 version: 6f3806018612930941127f2a7c6c453ba2c527d2
subpackages: subpackages:
@ -90,6 +91,7 @@ imports:
version: f35b8ab0b5a2cef36673838d662e249dd9c94686 version: f35b8ab0b5a2cef36673838d662e249dd9c94686
subpackages: subpackages:
- assert - assert
- require
- name: golang.org/x/net - name: golang.org/x/net
version: 59a0b19b5533c7977ddeb86b017bf507ed407b12 version: 59a0b19b5533c7977ddeb86b017bf507ed407b12
subpackages: subpackages:
@ -133,5 +135,5 @@ imports:
- tap - tap
- transport - transport
- name: gopkg.in/yaml.v2 - name: gopkg.in/yaml.v2
version: cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b version: 5420a8b6744d3b0345ab293f6fcba19c978f1183
testImports: [] testImports: []

@ -26,7 +26,8 @@ import:
version: ^1.1.4 version: ^1.1.4
subpackages: subpackages:
- assert - assert
- package: gopkg.in/yaml.v2
- package: github.com/cockroachdb/cmux - package: github.com/cockroachdb/cmux
- package: github.com/deckarep/golang-set - package: github.com/deckarep/golang-set
version: ^1.7.1 version: ^1.7.1
- package: gopkg.in/yaml.v2
version: ^2.2.1

@ -0,0 +1,66 @@
// Copyright 2018 clair authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package fsutil contains utility functions for file system querying.
package fsutil
import (
"os"
"strings"
)
type dirFilter int
const (
// All doesn't filter anything
All dirFilter = iota
// FilesOnly filters Dir function to return only files
FilesOnly
// DirectoriesOnly filters Dir function to return only directories
DirectoriesOnly
)
// Readdir lists the files or folders under the given path and filter based on the
// dirFilter.
func Readdir(path string, filter dirFilter) ([]string, error) {
dir, err := os.Open(path)
if err != nil {
return nil, err
}
defer dir.Close()
finfos, err := dir.Readdir(0)
if err != nil {
return nil, err
}
var files []string
for _, info := range finfos {
if filter == DirectoriesOnly && !info.IsDir() {
continue
}
if filter == FilesOnly && info.IsDir() {
continue
}
if strings.HasPrefix(info.Name(), ".") {
continue
}
files = append(files, info.Name())
}
return files, nil
}

@ -1,8 +1,18 @@
sudo: false sudo: false
language: go language: go
go: go:
- 1.1 - 1.7
- 1.2 - 1.8
- 1.3 - 1.9
- 1.4 - "1.10"
- tip - tip
before_install:
- go get golang.org/x/tools/cmd/cover
- go get github.com/mattn/goveralls
- go get github.com/golang/lint/golint
script:
- go test -v -covermode=count -coverprofile=coverage.out
- go vet ./...
- test -z "$(gofmt -d -s . | tee /dev/stderr)"
- test -z "$(golint ./... | tee /dev/stderr)"
- $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci

@ -1,58 +1,37 @@
# HttpRouter [![Build Status](https://travis-ci.org/julienschmidt/httprouter.png?branch=master)](https://travis-ci.org/julienschmidt/httprouter) [![Coverage](http://gocover.io/_badge/github.com/julienschmidt/httprouter?0)](http://gocover.io/github.com/julienschmidt/httprouter) [![GoDoc](http://godoc.org/github.com/julienschmidt/httprouter?status.png)](http://godoc.org/github.com/julienschmidt/httprouter) # HttpRouter [![Build Status](https://travis-ci.org/julienschmidt/httprouter.svg?branch=master)](https://travis-ci.org/julienschmidt/httprouter) [![Coverage Status](https://coveralls.io/repos/github/julienschmidt/httprouter/badge.svg?branch=master)](https://coveralls.io/github/julienschmidt/httprouter?branch=master) [![GoDoc](https://godoc.org/github.com/julienschmidt/httprouter?status.svg)](http://godoc.org/github.com/julienschmidt/httprouter)
HttpRouter is a lightweight high performance HTTP request router HttpRouter is a lightweight high performance HTTP request router (also called *multiplexer* or just *mux* for short) for [Go](https://golang.org/).
(also called *multiplexer* or just *mux* for short) for [Go](http://golang.org/).
In contrast to the [default mux](http://golang.org/pkg/net/http/#ServeMux) of Go's net/http package, this router supports In contrast to the [default mux](https://golang.org/pkg/net/http/#ServeMux) of Go's `net/http` package, this router supports variables in the routing pattern and matches against the request method. It also scales better.
variables in the routing pattern and matches against the request method.
It also scales better.
The router is optimized for high performance and a small memory footprint. The router is optimized for high performance and a small memory footprint. It scales well even with very long paths and a large number of routes. A compressing dynamic trie (radix tree) structure is used for efficient matching.
It scales well even with very long paths and a large number of routes.
A compressing dynamic trie (radix tree) structure is used for efficient matching.
## Features ## Features
**Only explicit matches:** With other routers, like [http.ServeMux](http://golang.org/pkg/net/http/#ServeMux),
a requested URL path could match multiple patterns. Therefore they have some **Only explicit matches:** With other routers, like [`http.ServeMux`](https://golang.org/pkg/net/http/#ServeMux), a requested URL path could match multiple patterns. Therefore they have some awkward pattern priority rules, like *longest match* or *first registered, first matched*. By design of this router, a request can only match exactly one or no route. As a result, there are also no unintended matches, which makes it great for SEO and improves the user experience.
awkward pattern priority rules, like *longest match* or *first registered,
first matched*. By design of this router, a request can only match exactly one **Stop caring about trailing slashes:** Choose the URL style you like, the router automatically redirects the client if a trailing slash is missing or if there is one extra. Of course it only does so, if the new path has a handler. If you don't like it, you can [turn off this behavior](https://godoc.org/github.com/julienschmidt/httprouter#Router.RedirectTrailingSlash).
or no route. As a result, there are also no unintended matches, which makes it
great for SEO and improves the user experience. **Path auto-correction:** Besides detecting the missing or additional trailing slash at no extra cost, the router can also fix wrong cases and remove superfluous path elements (like `../` or `//`). Is [CAPTAIN CAPS LOCK](http://www.urbandictionary.com/define.php?term=Captain+Caps+Lock) one of your users? HttpRouter can help him by making a case-insensitive look-up and redirecting him to the correct URL.
**Stop caring about trailing slashes:** Choose the URL style you like, the **Parameters in your routing pattern:** Stop parsing the requested URL path, just give the path segment a name and the router delivers the dynamic value to you. Because of the design of the router, path parameters are very cheap.
router automatically redirects the client if a trailing slash is missing or if
there is one extra. Of course it only does so, if the new path has a handler. **Zero Garbage:** The matching and dispatching process generates zero bytes of garbage. The only heap allocations that are made are building the slice of the key-value pairs for path parameters, and building new context and request objects (the latter only in the standard `Handler`/`HandlerFunc` api). In the 3-argument API, if the request path contains no parameters not a single heap allocation is necessary.
If you don't like it, you can [turn off this behavior](http://godoc.org/github.com/julienschmidt/httprouter#Router.RedirectTrailingSlash).
**Best Performance:** [Benchmarks speak for themselves](https://github.com/julienschmidt/go-http-routing-benchmark). See below for technical details of the implementation.
**Path auto-correction:** Besides detecting the missing or additional trailing
slash at no extra cost, the router can also fix wrong cases and remove **No more server crashes:** You can set a [Panic handler](https://godoc.org/github.com/julienschmidt/httprouter#Router.PanicHandler) to deal with panics occurring during handling a HTTP request. The router then recovers and lets the `PanicHandler` log what happened and deliver a nice error page.
superfluous path elements (like `../` or `//`).
Is [CAPTAIN CAPS LOCK](http://www.urbandictionary.com/define.php?term=Captain+Caps+Lock) one of your users? **Perfect for APIs:** The router design encourages to build sensible, hierarchical RESTful APIs. Moreover it has builtin native support for [OPTIONS requests](http://zacstewart.com/2012/04/14/http-options-method.html) and `405 Method Not Allowed` replies.
HttpRouter can help him by making a case-insensitive look-up and redirecting him
to the correct URL. Of course you can also set **custom [`NotFound`](https://godoc.org/github.com/julienschmidt/httprouter#Router.NotFound) and [`MethodNotAllowed`](https://godoc.org/github.com/julienschmidt/httprouter#Router.MethodNotAllowed) handlers** and [**serve static files**](https://godoc.org/github.com/julienschmidt/httprouter#Router.ServeFiles).
**Parameters in your routing pattern:** Stop parsing the requested URL path,
just give the path segment a name and the router delivers the dynamic value to
you. Because of the design of the router, path parameters are very cheap.
**Zero Garbage:** The matching and dispatching process generates zero bytes of
garbage. In fact, the only heap allocations that are made, is by building the
slice of the key-value pairs for path parameters. If the request path contains
no parameters, not a single heap allocation is necessary.
**Best Performance:** [Benchmarks speak for themselves](https://github.com/julienschmidt/go-http-routing-benchmark).
See below for technical details of the implementation.
**No more server crashes:** You can set a [Panic handler](http://godoc.org/github.com/julienschmidt/httprouter#Router.PanicHandler) to deal with panics
occurring during handling a HTTP request. The router then recovers and lets the
PanicHandler log what happened and deliver a nice error page.
Of course you can also set **custom [NotFound](http://godoc.org/github.com/julienschmidt/httprouter#Router.NotFound) and [MethodNotAllowed](http://godoc.org/github.com/julienschmidt/httprouter#Router.MethodNotAllowed) handlers** and [**serve static files**](http://godoc.org/github.com/julienschmidt/httprouter#Router.ServeFiles).
## Usage ## Usage
This is just a quick introduction, view the [GoDoc](http://godoc.org/github.com/julienschmidt/httprouter) for details. This is just a quick introduction, view the [GoDoc](http://godoc.org/github.com/julienschmidt/httprouter) for details.
Let's start with a trivial example: Let's start with a trivial example:
```go ```go
package main package main
@ -81,12 +60,11 @@ func main() {
``` ```
### Named parameters ### Named parameters
As you can see, `:name` is a *named parameter*.
The values are accessible via `httprouter.Params`, which is just a slice of `httprouter.Param`s. As you can see, `:name` is a *named parameter*. The values are accessible via `httprouter.Params`, which is just a slice of `httprouter.Param`s. You can get the value of a parameter either by its index in the slice, or by using the `ByName(name)` method: `:name` can be retrived by `ByName("name")`.
You can get the value of a parameter either by its index in the slice, or by using the `ByName(name)` method:
`:name` can be retrived by `ByName("name")`.
Named parameters only match a single path segment: Named parameters only match a single path segment:
``` ```
Pattern: /user/:user Pattern: /user/:user
@ -99,9 +77,9 @@ Pattern: /user/:user
**Note:** Since this router has only explicit matches, you can not register static routes and parameters for the same path segment. For example you can not register the patterns `/user/new` and `/user/:user` for the same request method at the same time. The routing of different request methods is independent from each other. **Note:** Since this router has only explicit matches, you can not register static routes and parameters for the same path segment. For example you can not register the patterns `/user/new` and `/user/:user` for the same request method at the same time. The routing of different request methods is independent from each other.
### Catch-All parameters ### Catch-All parameters
The second type are *catch-all* parameters and have the form `*name`.
Like the name suggests, they match everything. The second type are *catch-all* parameters and have the form `*name`. Like the name suggests, they match everything. Therefore they must always be at the **end** of the pattern:
Therefore they must always be at the **end** of the pattern:
``` ```
Pattern: /src/*filepath Pattern: /src/*filepath
@ -111,11 +89,8 @@ Pattern: /src/*filepath
``` ```
## How does it work? ## How does it work?
The router relies on a tree structure which makes heavy use of *common prefixes*,
it is basically a *compact* [*prefix tree*](http://en.wikipedia.org/wiki/Trie) The router relies on a tree structure which makes heavy use of *common prefixes*, it is basically a *compact* [*prefix tree*](https://en.wikipedia.org/wiki/Trie) (or just [*Radix tree*](https://en.wikipedia.org/wiki/Radix_tree)). Nodes with a common prefix also share a common parent. Here is a short example what the routing tree for the `GET` request method could look like:
(or just [*Radix tree*](http://en.wikipedia.org/wiki/Radix_tree)).
Nodes with a common prefix also share a common parent. Here is a short example
what the routing tree for the `GET` request method could look like:
``` ```
Priority Path Handle Priority Path Handle
@ -130,33 +105,15 @@ Priority Path Handle
1 | └team\ *<7> 1 | └team\ *<7>
1 └contact\ *<8> 1 └contact\ *<8>
``` ```
Every `*<num>` represents the memory address of a handler function (a pointer).
If you follow a path trough the tree from the root to the leaf, you get the Every `*<num>` represents the memory address of a handler function (a pointer). If you follow a path trough the tree from the root to the leaf, you get the complete route path, e.g `\blog\:post\`, where `:post` is just a placeholder ([*parameter*](#named-parameters)) for an actual post name. Unlike hash-maps, a tree structure also allows us to use dynamic parts like the `:post` parameter, since we actually match against the routing patterns instead of just comparing hashes. [As benchmarks show](https://github.com/julienschmidt/go-http-routing-benchmark), this works very well and efficient.
complete route path, e.g `\blog\:post\`, where `:post` is just a placeholder
([*parameter*](#named-parameters)) for an actual post name. Unlike hash-maps, a Since URL paths have a hierarchical structure and make use only of a limited set of characters (byte values), it is very likely that there are a lot of common prefixes. This allows us to easily reduce the routing into ever smaller problems. Moreover the router manages a separate tree for every request method. For one thing it is more space efficient than holding a method->handle map in every single node, it also allows us to greatly reduce the routing problem before even starting the look-up in the prefix-tree.
tree structure also allows us to use dynamic parts like the `:post` parameter,
since we actually match against the routing patterns instead of just comparing For even better scalability, the child nodes on each tree level are ordered by priority, where the priority is just the number of handles registered in sub nodes (children, grandchildren, and so on..). This helps in two ways:
hashes. [As benchmarks show](https://github.com/julienschmidt/go-http-routing-benchmark),
this works very well and efficient. 1. Nodes which are part of the most routing paths are evaluated first. This helps to make as much routes as possible to be reachable as fast as possible.
2. It is some sort of cost compensation. The longest reachable path (highest cost) can always be evaluated first. The following scheme visualizes the tree structure. Nodes are evaluated from top to bottom and from left to right.
Since URL paths have a hierarchical structure and make use only of a limited set
of characters (byte values), it is very likely that there are a lot of common
prefixes. This allows us to easily reduce the routing into ever smaller problems.
Moreover the router manages a separate tree for every request method.
For one thing it is more space efficient than holding a method->handle map in
every single node, for another thing is also allows us to greatly reduce the
routing problem before even starting the look-up in the prefix-tree.
For even better scalability, the child nodes on each tree level are ordered by
priority, where the priority is just the number of handles registered in sub
nodes (children, grandchildren, and so on..).
This helps in two ways:
1. Nodes which are part of the most routing paths are evaluated first. This
helps to make as much routes as possible to be reachable as fast as possible.
2. It is some sort of cost compensation. The longest reachable path (highest
cost) can always be evaluated first. The following scheme visualizes the tree
structure. Nodes are evaluated from top to bottom and from left to right.
``` ```
├------------ ├------------
@ -168,47 +125,38 @@ structure. Nodes are evaluated from top to bottom and from left to right.
└- └-
``` ```
## Why doesn't this work with `http.Handler`?
## Why doesn't this work with http.Handler? **It does!** The router itself implements the `http.Handler` interface. Moreover the router provides convenient [adapters for `http.Handler`](https://godoc.org/github.com/julienschmidt/httprouter#Router.Handler)s and [`http.HandlerFunc`](https://godoc.org/github.com/julienschmidt/httprouter#Router.HandlerFunc)s which allows them to be used as a [`httprouter.Handle`](https://godoc.org/github.com/julienschmidt/httprouter#Router.Handle) when registering a route. The only disadvantage is, that no parameter values can be retrieved when a `http.Handler` or `http.HandlerFunc` is used, since there is no efficient way to pass the values with the existing function parameters. Therefore [`httprouter.Handle`](https://godoc.org/github.com/julienschmidt/httprouter#Router.Handle) has a third function parameter.
**It does!** The router itself implements the http.Handler interface.
Moreover the router provides convenient [adapters for http.Handler](http://godoc.org/github.com/julienschmidt/httprouter#Router.Handler)s and [http.HandlerFunc](http://godoc.org/github.com/julienschmidt/httprouter#Router.HandlerFunc)s
which allows them to be used as a [httprouter.Handle](http://godoc.org/github.com/julienschmidt/httprouter#Router.Handle) when registering a route.
The only disadvantage is, that no parameter values can be retrieved when a
http.Handler or http.HandlerFunc is used, since there is no efficient way to
pass the values with the existing function parameters.
Therefore [httprouter.Handle](http://godoc.org/github.com/julienschmidt/httprouter#Router.Handle) has a third function parameter.
Just try it out for yourself, the usage of HttpRouter is very straightforward. The package is compact and minimalistic, but also probably one of the easiest routers to set up. Just try it out for yourself, the usage of HttpRouter is very straightforward. The package is compact and minimalistic, but also probably one of the easiest routers to set up.
## Where can I find Middleware *X*? ## Where can I find Middleware *X*?
This package just provides a very efficient request router with a few extra
features. The router is just a [http.Handler](http://golang.org/pkg/net/http/#Handler), This package just provides a very efficient request router with a few extra features. The router is just a [`http.Handler`](https://golang.org/pkg/net/http/#Handler), you can chain any http.Handler compatible middleware before the router, for example the [Gorilla handlers](http://www.gorillatoolkit.org/pkg/handlers). Or you could [just write your own](https://justinas.org/writing-http-middleware-in-go/), it's very easy!
you can chain any http.Handler compatible middleware before the router,
for example the [Gorilla handlers](http://www.gorillatoolkit.org/pkg/handlers).
Or you could [just write your own](http://justinas.org/writing-http-middleware-in-go/),
it's very easy!
Alternatively, you could try [a web framework based on HttpRouter](#web-frameworks-based-on-httprouter). Alternatively, you could try [a web framework based on HttpRouter](#web-frameworks-based-on-httprouter).
### Multi-domain / Sub-domains ### Multi-domain / Sub-domains
Here is a quick example: Does your server serve multiple domains / hosts? Here is a quick example: Does your server serve multiple domains / hosts?
You want to use sub-domains? You want to use sub-domains?
Define a router per host! Define a router per host!
```go ```go
// We need an object that implements the http.Handler interface. // We need an object that implements the http.Handler interface.
// Therefore we need a type for which we implement the ServeHTTP method. // Therefore we need a type for which we implement the ServeHTTP method.
// We just use a map here, in which we map host names (with port) to http.Handlers // We just use a map here, in which we map host names (with port) to http.Handlers
type HostSwitch map[string]http.Handler type HostSwitch map[string]http.Handler
// Implement the ServerHTTP method on our new type // Implement the ServeHTTP method on our new type
func (hs HostSwitch) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (hs HostSwitch) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Check if a http.Handler is registered for the given host. // Check if a http.Handler is registered for the given host.
// If yes, use it to handle the request. // If yes, use it to handle the request.
if handler := hs[r.Host]; handler != nil { if handler := hs[r.Host]; handler != nil {
handler.ServeHTTP(w, r) handler.ServeHTTP(w, r)
} else { } else {
// Handle host names for wich no handler is registered // Handle host names for which no handler is registered
http.Error(w, "Forbidden", 403) // Or Redirect? http.Error(w, "Forbidden", 403) // Or Redirect?
} }
} }
@ -230,94 +178,89 @@ func main() {
``` ```
### Basic Authentication ### Basic Authentication
Another quick example: Basic Authentification (RFC 2617) for handles:
Another quick example: Basic Authentication (RFC 2617) for handles:
```go ```go
package main package main
import ( import (
"bytes" "fmt"
"encoding/base64" "log"
"fmt" "net/http"
"github.com/julienschmidt/httprouter"
"net/http" "github.com/julienschmidt/httprouter"
"log"
"strings"
) )
func BasicAuth(h httprouter.Handle, user, pass []byte) httprouter.Handle { func BasicAuth(h httprouter.Handle, requiredUser, requiredPassword string) httprouter.Handle {
return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
const basicAuthPrefix string = "Basic "
// Get the Basic Authentication credentials // Get the Basic Authentication credentials
auth := r.Header.Get("Authorization") user, password, hasAuth := r.BasicAuth()
if strings.HasPrefix(auth, basicAuthPrefix) {
// Check credentials if hasAuth && user == requiredUser && password == requiredPassword {
payload, err := base64.StdEncoding.DecodeString(auth[len(basicAuthPrefix):]) // Delegate request to the given handle
if err == nil { h(w, r, ps)
pair := bytes.SplitN(payload, []byte(":"), 2) } else {
if len(pair) == 2 && // Request Basic Authentication otherwise
bytes.Equal(pair[0], user) && w.Header().Set("WWW-Authenticate", "Basic realm=Restricted")
bytes.Equal(pair[1], pass) { http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
// Delegate request to the given handle
h(w, r, ps)
return
}
}
} }
// Request Basic Authentication otherwise
w.Header().Set("WWW-Authenticate", "Basic realm=Restricted")
http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
} }
} }
func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
fmt.Fprint(w, "Not protected!\n") fmt.Fprint(w, "Not protected!\n")
} }
func Protected(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { func Protected(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
fmt.Fprint(w, "Protected!\n") fmt.Fprint(w, "Protected!\n")
} }
func main() { func main() {
user := []byte("gordon") user := "gordon"
pass := []byte("secret!") pass := "secret!"
router := httprouter.New()
router.GET("/", Index)
router.GET("/protected/", BasicAuth(Protected, user, pass))
log.Fatal(http.ListenAndServe(":8080", router)) router := httprouter.New()
router.GET("/", Index)
router.GET("/protected/", BasicAuth(Protected, user, pass))
log.Fatal(http.ListenAndServe(":8080", router))
} }
``` ```
## Chaining with the NotFound handler ## Chaining with the NotFound handler
**NOTE: It might be required to set [Router.HandleMethodNotAllowed](http://godoc.org/github.com/julienschmidt/httprouter#Router.HandleMethodNotAllowed) to `false` to avoid problems.** **NOTE: It might be required to set [`Router.HandleMethodNotAllowed`](https://godoc.org/github.com/julienschmidt/httprouter#Router.HandleMethodNotAllowed) to `false` to avoid problems.**
You can use another [http.HandlerFunc](http://golang.org/pkg/net/http/#HandlerFunc), for example another router, to handle requests which could not be matched by this router by using the [Router.NotFound](http://godoc.org/github.com/julienschmidt/httprouter#Router.NotFound) handler. This allows chaining. You can use another [`http.Handler`](https://golang.org/pkg/net/http/#Handler), for example another router, to handle requests which could not be matched by this router by using the [`Router.NotFound`](https://godoc.org/github.com/julienschmidt/httprouter#Router.NotFound) handler. This allows chaining.
### Static files ### Static files
The `NotFound` handler can for example be used to serve static files from the root path `/` (like an index.html file along with other assets):
The `NotFound` handler can for example be used to serve static files from the root path `/` (like an `index.html` file along with other assets):
```go ```go
// Serve static files from the ./public directory // Serve static files from the ./public directory
router.NotFound = http.FileServer(http.Dir("public")).ServeHTTP router.NotFound = http.FileServer(http.Dir("public"))
``` ```
But this approach sidesteps the strict core rules of this router to avoid routing problems. A cleaner approach is to use a distinct sub-path for serving files, like `/static/*filepath` or `/files/*filepath`. But this approach sidesteps the strict core rules of this router to avoid routing problems. A cleaner approach is to use a distinct sub-path for serving files, like `/static/*filepath` or `/files/*filepath`.
## Web Frameworks based on HttpRouter ## Web Frameworks based on HttpRouter
If the HttpRouter is a bit too minimalistic for you, you might try one of the following more high-level 3rd-party web frameworks building upon the HttpRouter package: If the HttpRouter is a bit too minimalistic for you, you might try one of the following more high-level 3rd-party web frameworks building upon the HttpRouter package:
* [Ace](https://github.com/plimble/ace): Blazing fast Go Web Framework * [Ace](https://github.com/plimble/ace): Blazing fast Go Web Framework
* [api2go](https://github.com/univedo/api2go): A JSON API Implementation for Go * [api2go](https://github.com/manyminds/api2go): A JSON API Implementation for Go
* [Gin](https://github.com/gin-gonic/gin): Features a martini-like API with much better performance * [Gin](https://github.com/gin-gonic/gin): Features a martini-like API with much better performance
* [Goat](https://github.com/bahlo/goat): A minimalistic REST API server in Go * [Goat](https://github.com/bahlo/goat): A minimalistic REST API server in Go
* [goMiddlewareChain](https://github.com/TobiEiss/goMiddlewareChain): An express.js-like-middleware-chain
* [Hikaru](https://github.com/najeira/hikaru): Supports standalone and Google AppEngine * [Hikaru](https://github.com/najeira/hikaru): Supports standalone and Google AppEngine
* [Hitch](https://github.com/nbio/hitch): Hitch ties httprouter, [httpcontext](https://github.com/nbio/httpcontext), and middleware up in a bow * [Hitch](https://github.com/nbio/hitch): Hitch ties httprouter, [httpcontext](https://github.com/nbio/httpcontext), and middleware up in a bow
* [httpway](https://github.com/corneldamian/httpway): Simple middleware extension with context for httprouter and a server with gracefully shutdown support
* [kami](https://github.com/guregu/kami): A tiny web framework using x/net/context * [kami](https://github.com/guregu/kami): A tiny web framework using x/net/context
* [Medeina](https://github.com/imdario/medeina): Inspired by Ruby's Roda and Cuba * [Medeina](https://github.com/imdario/medeina): Inspired by Ruby's Roda and Cuba
* [Neko](https://github.com/rocwong/neko): A lightweight web application framework for Golang * [Neko](https://github.com/rocwong/neko): A lightweight web application framework for Golang
* [River](https://github.com/abiosoft/river): River is a simple and lightweight REST server
* [Roxanna](https://github.com/iamthemuffinman/Roxanna): An amalgamation of httprouter, better logging, and hot reload * [Roxanna](https://github.com/iamthemuffinman/Roxanna): An amalgamation of httprouter, better logging, and hot reload
* [siesta](https://github.com/VividCortex/siesta): Composable HTTP handlers with contexts * [siesta](https://github.com/VividCortex/siesta): Composable HTTP handlers with contexts
* [xmux](https://github.com/rs/xmux): xmux is a httprouter fork on top of xhandler (net/context aware)

@ -0,0 +1,38 @@
// +build go1.7
package httprouter
import (
"context"
"net/http"
)
type paramsKey struct{}
// ParamsKey is the request context key under which URL params are stored.
//
// This is only present from go 1.7.
var ParamsKey = paramsKey{}
// Handler is an adapter which allows the usage of an http.Handler as a
// request handle. With go 1.7+, the Params will be available in the
// request context under ParamsKey.
func (r *Router) Handler(method, path string, handler http.Handler) {
r.Handle(method, path,
func(w http.ResponseWriter, req *http.Request, p Params) {
ctx := req.Context()
ctx = context.WithValue(ctx, ParamsKey, p)
req = req.WithContext(ctx)
handler.ServeHTTP(w, req)
},
)
}
// ParamsFromContext pulls the URL parameters from a request context,
// or returns nil if none are present.
//
// This is only present from go 1.7.
func ParamsFromContext(ctx context.Context) Params {
p, _ := ctx.Value(ParamsKey).(Params)
return p
}

@ -0,0 +1,16 @@
// +build !go1.7
package httprouter
import "net/http"
// Handler is an adapter which allows the usage of an http.Handler as a
// request handle. With go 1.7+, the Params will be available in the
// request context under ParamsKey.
func (r *Router) Handler(method, path string, handler http.Handler) {
r.Handle(method, path,
func(w http.ResponseWriter, req *http.Request, _ Params) {
handler.ServeHTTP(w, req)
},
)
}

@ -41,7 +41,7 @@ func CleanPath(p string) string {
buf[0] = '/' buf[0] = '/'
} }
trailing := n > 2 && p[n-1] == '/' trailing := n > 1 && p[n-1] == '/'
// A bit more clunky without a 'lazybuf' like the path package, but the loop // A bit more clunky without a 'lazybuf' like the path package, but the loop
// gets completely inlined (bufApp). So in contrast to the path package this // gets completely inlined (bufApp). So in contrast to the path package this
@ -59,11 +59,11 @@ func CleanPath(p string) string {
case p[r] == '.' && p[r+1] == '/': case p[r] == '.' && p[r+1] == '/':
// . element // . element
r++ r += 2
case p[r] == '.' && p[r+1] == '.' && (r+2 == n || p[r+2] == '/'): case p[r] == '.' && p[r+1] == '.' && (r+2 == n || p[r+2] == '/'):
// .. element: remove to last / // .. element: remove to last /
r += 2 r += 3
if w > 1 { if w > 1 {
// can backtrack // can backtrack

@ -22,6 +22,7 @@ var cleanTests = []struct {
// missing root // missing root
{"", "/"}, {"", "/"},
{"a/", "/a/"},
{"abc", "/abc"}, {"abc", "/abc"},
{"abc/def", "/abc/def"}, {"abc/def", "/abc/def"},
{"a/b/c", "/a/b/c"}, {"a/b/c", "/a/b/c"},

@ -55,7 +55,7 @@
// //
// Catch-all parameters match anything until the path end, including the // Catch-all parameters match anything until the path end, including the
// directory index (the '/' before the catch-all). Since they match anything // directory index (the '/' before the catch-all). Since they match anything
// until the end, catch-all paramerters must always be the final path element. // until the end, catch-all parameters must always be the final path element.
// Path: /files/*filepath // Path: /files/*filepath
// //
// Requests: // Requests:
@ -138,14 +138,20 @@ type Router struct {
// handler. // handler.
HandleMethodNotAllowed bool HandleMethodNotAllowed bool
// Configurable http.HandlerFunc which is called when no matching route is // If enabled, the router automatically replies to OPTIONS requests.
// Custom OPTIONS handlers take priority over automatic replies.
HandleOPTIONS bool
// Configurable http.Handler which is called when no matching route is
// found. If it is not set, http.NotFound is used. // found. If it is not set, http.NotFound is used.
NotFound http.HandlerFunc NotFound http.Handler
// Configurable http.HandlerFunc which is called when a request // Configurable http.Handler which is called when a request
// cannot be routed and HandleMethodNotAllowed is true. // cannot be routed and HandleMethodNotAllowed is true.
// If it is not set, http.Error with http.StatusMethodNotAllowed is used. // If it is not set, http.Error with http.StatusMethodNotAllowed is used.
MethodNotAllowed http.HandlerFunc // The "Allow" header with allowed request methods is set before the handler
// is called.
MethodNotAllowed http.Handler
// Function to handle panics recovered from http handlers. // Function to handle panics recovered from http handlers.
// It should be used to generate a error page and return the http error code // It should be used to generate a error page and return the http error code
@ -165,6 +171,7 @@ func New() *Router {
RedirectTrailingSlash: true, RedirectTrailingSlash: true,
RedirectFixedPath: true, RedirectFixedPath: true,
HandleMethodNotAllowed: true, HandleMethodNotAllowed: true,
HandleOPTIONS: true,
} }
} }
@ -229,16 +236,6 @@ func (r *Router) Handle(method, path string, handle Handle) {
root.addRoute(path, handle) root.addRoute(path, handle)
} }
// Handler is an adapter which allows the usage of an http.Handler as a
// request handle.
func (r *Router) Handler(method, path string, handler http.Handler) {
r.Handle(method, path,
func(w http.ResponseWriter, req *http.Request, _ Params) {
handler.ServeHTTP(w, req)
},
)
}
// HandlerFunc is an adapter which allows the usage of an http.HandlerFunc as a // HandlerFunc is an adapter which allows the usage of an http.HandlerFunc as a
// request handle. // request handle.
func (r *Router) HandlerFunc(method, path string, handler http.HandlerFunc) { func (r *Router) HandlerFunc(method, path string, handler http.HandlerFunc) {
@ -286,15 +283,53 @@ func (r *Router) Lookup(method, path string) (Handle, Params, bool) {
return nil, nil, false return nil, nil, false
} }
func (r *Router) allowed(path, reqMethod string) (allow string) {
if path == "*" { // server-wide
for method := range r.trees {
if method == "OPTIONS" {
continue
}
// add request method to list of allowed methods
if len(allow) == 0 {
allow = method
} else {
allow += ", " + method
}
}
} else { // specific path
for method := range r.trees {
// Skip the requested method - we already tried this one
if method == reqMethod || method == "OPTIONS" {
continue
}
handle, _, _ := r.trees[method].getValue(path)
if handle != nil {
// add request method to list of allowed methods
if len(allow) == 0 {
allow = method
} else {
allow += ", " + method
}
}
}
}
if len(allow) > 0 {
allow += ", OPTIONS"
}
return
}
// ServeHTTP makes the router implement the http.Handler interface. // ServeHTTP makes the router implement the http.Handler interface.
func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if r.PanicHandler != nil { if r.PanicHandler != nil {
defer r.recv(w, req) defer r.recv(w, req)
} }
if root := r.trees[req.Method]; root != nil { path := req.URL.Path
path := req.URL.Path
if root := r.trees[req.Method]; root != nil {
if handle, ps, tsr := root.getValue(path); handle != nil { if handle, ps, tsr := root.getValue(path); handle != nil {
handle(w, req, ps) handle(w, req, ps)
return return
@ -331,18 +366,19 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
} }
} }
// Handle 405 if req.Method == "OPTIONS" && r.HandleOPTIONS {
if r.HandleMethodNotAllowed { // Handle OPTIONS requests
for method := range r.trees { if allow := r.allowed(path, req.Method); len(allow) > 0 {
// Skip the requested method - we already tried this one w.Header().Set("Allow", allow)
if method == req.Method { return
continue }
} } else {
// Handle 405
handle, _, _ := r.trees[method].getValue(req.URL.Path) if r.HandleMethodNotAllowed {
if handle != nil { if allow := r.allowed(path, req.Method); len(allow) > 0 {
w.Header().Set("Allow", allow)
if r.MethodNotAllowed != nil { if r.MethodNotAllowed != nil {
r.MethodNotAllowed(w, req) r.MethodNotAllowed.ServeHTTP(w, req)
} else { } else {
http.Error(w, http.Error(w,
http.StatusText(http.StatusMethodNotAllowed), http.StatusText(http.StatusMethodNotAllowed),
@ -356,7 +392,7 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Handle 404 // Handle 404
if r.NotFound != nil { if r.NotFound != nil {
r.NotFound(w, req) r.NotFound.ServeHTTP(w, req)
} else { } else {
http.NotFound(w, req) http.NotFound(w, req)
} }

@ -68,11 +68,11 @@ func TestRouter(t *testing.T) {
} }
type handlerStruct struct { type handlerStruct struct {
handeled *bool handled *bool
} }
func (h handlerStruct) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (h handlerStruct) ServeHTTP(w http.ResponseWriter, r *http.Request) {
*h.handeled = true *h.handled = true
} }
func TestRouterAPI(t *testing.T) { func TestRouterAPI(t *testing.T) {
@ -174,26 +174,175 @@ func TestRouterRoot(t *testing.T) {
} }
} }
func TestRouterChaining(t *testing.T) {
router1 := New()
router2 := New()
router1.NotFound = router2
fooHit := false
router1.POST("/foo", func(w http.ResponseWriter, req *http.Request, _ Params) {
fooHit = true
w.WriteHeader(http.StatusOK)
})
barHit := false
router2.POST("/bar", func(w http.ResponseWriter, req *http.Request, _ Params) {
barHit = true
w.WriteHeader(http.StatusOK)
})
r, _ := http.NewRequest("POST", "/foo", nil)
w := httptest.NewRecorder()
router1.ServeHTTP(w, r)
if !(w.Code == http.StatusOK && fooHit) {
t.Errorf("Regular routing failed with router chaining.")
t.FailNow()
}
r, _ = http.NewRequest("POST", "/bar", nil)
w = httptest.NewRecorder()
router1.ServeHTTP(w, r)
if !(w.Code == http.StatusOK && barHit) {
t.Errorf("Chained routing failed with router chaining.")
t.FailNow()
}
r, _ = http.NewRequest("POST", "/qax", nil)
w = httptest.NewRecorder()
router1.ServeHTTP(w, r)
if !(w.Code == http.StatusNotFound) {
t.Errorf("NotFound behavior failed with router chaining.")
t.FailNow()
}
}
func TestRouterOPTIONS(t *testing.T) {
handlerFunc := func(_ http.ResponseWriter, _ *http.Request, _ Params) {}
router := New()
router.POST("/path", handlerFunc)
// test not allowed
// * (server)
r, _ := http.NewRequest("OPTIONS", "*", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, r)
if !(w.Code == http.StatusOK) {
t.Errorf("OPTIONS handling failed: Code=%d, Header=%v", w.Code, w.Header())
} else if allow := w.Header().Get("Allow"); allow != "POST, OPTIONS" {
t.Error("unexpected Allow header value: " + allow)
}
// path
r, _ = http.NewRequest("OPTIONS", "/path", nil)
w = httptest.NewRecorder()
router.ServeHTTP(w, r)
if !(w.Code == http.StatusOK) {
t.Errorf("OPTIONS handling failed: Code=%d, Header=%v", w.Code, w.Header())
} else if allow := w.Header().Get("Allow"); allow != "POST, OPTIONS" {
t.Error("unexpected Allow header value: " + allow)
}
r, _ = http.NewRequest("OPTIONS", "/doesnotexist", nil)
w = httptest.NewRecorder()
router.ServeHTTP(w, r)
if !(w.Code == http.StatusNotFound) {
t.Errorf("OPTIONS handling failed: Code=%d, Header=%v", w.Code, w.Header())
}
// add another method
router.GET("/path", handlerFunc)
// test again
// * (server)
r, _ = http.NewRequest("OPTIONS", "*", nil)
w = httptest.NewRecorder()
router.ServeHTTP(w, r)
if !(w.Code == http.StatusOK) {
t.Errorf("OPTIONS handling failed: Code=%d, Header=%v", w.Code, w.Header())
} else if allow := w.Header().Get("Allow"); allow != "POST, GET, OPTIONS" && allow != "GET, POST, OPTIONS" {
t.Error("unexpected Allow header value: " + allow)
}
// path
r, _ = http.NewRequest("OPTIONS", "/path", nil)
w = httptest.NewRecorder()
router.ServeHTTP(w, r)
if !(w.Code == http.StatusOK) {
t.Errorf("OPTIONS handling failed: Code=%d, Header=%v", w.Code, w.Header())
} else if allow := w.Header().Get("Allow"); allow != "POST, GET, OPTIONS" && allow != "GET, POST, OPTIONS" {
t.Error("unexpected Allow header value: " + allow)
}
// custom handler
var custom bool
router.OPTIONS("/path", func(w http.ResponseWriter, r *http.Request, _ Params) {
custom = true
})
// test again
// * (server)
r, _ = http.NewRequest("OPTIONS", "*", nil)
w = httptest.NewRecorder()
router.ServeHTTP(w, r)
if !(w.Code == http.StatusOK) {
t.Errorf("OPTIONS handling failed: Code=%d, Header=%v", w.Code, w.Header())
} else if allow := w.Header().Get("Allow"); allow != "POST, GET, OPTIONS" && allow != "GET, POST, OPTIONS" {
t.Error("unexpected Allow header value: " + allow)
}
if custom {
t.Error("custom handler called on *")
}
// path
r, _ = http.NewRequest("OPTIONS", "/path", nil)
w = httptest.NewRecorder()
router.ServeHTTP(w, r)
if !(w.Code == http.StatusOK) {
t.Errorf("OPTIONS handling failed: Code=%d, Header=%v", w.Code, w.Header())
}
if !custom {
t.Error("custom handler not called")
}
}
func TestRouterNotAllowed(t *testing.T) { func TestRouterNotAllowed(t *testing.T) {
handlerFunc := func(_ http.ResponseWriter, _ *http.Request, _ Params) {} handlerFunc := func(_ http.ResponseWriter, _ *http.Request, _ Params) {}
router := New() router := New()
router.POST("/path", handlerFunc) router.POST("/path", handlerFunc)
// Test not allowed // test not allowed
r, _ := http.NewRequest("GET", "/path", nil) r, _ := http.NewRequest("GET", "/path", nil)
w := httptest.NewRecorder() w := httptest.NewRecorder()
router.ServeHTTP(w, r) router.ServeHTTP(w, r)
if !(w.Code == http.StatusMethodNotAllowed) { if !(w.Code == http.StatusMethodNotAllowed) {
t.Errorf("NotAllowed handling failed: Code=%d, Header=%v", w.Code, w.Header()) t.Errorf("NotAllowed handling failed: Code=%d, Header=%v", w.Code, w.Header())
} else if allow := w.Header().Get("Allow"); allow != "POST, OPTIONS" {
t.Error("unexpected Allow header value: " + allow)
}
// add another method
router.DELETE("/path", handlerFunc)
router.OPTIONS("/path", handlerFunc) // must be ignored
// test again
r, _ = http.NewRequest("GET", "/path", nil)
w = httptest.NewRecorder()
router.ServeHTTP(w, r)
if !(w.Code == http.StatusMethodNotAllowed) {
t.Errorf("NotAllowed handling failed: Code=%d, Header=%v", w.Code, w.Header())
} else if allow := w.Header().Get("Allow"); allow != "POST, DELETE, OPTIONS" && allow != "DELETE, POST, OPTIONS" {
t.Error("unexpected Allow header value: " + allow)
} }
// test custom handler
w = httptest.NewRecorder() w = httptest.NewRecorder()
responseText := "custom method" responseText := "custom method"
router.MethodNotAllowed = func(w http.ResponseWriter, req *http.Request) { router.MethodNotAllowed = http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(http.StatusTeapot) w.WriteHeader(http.StatusTeapot)
w.Write([]byte(responseText)) w.Write([]byte(responseText))
} })
router.ServeHTTP(w, r) router.ServeHTTP(w, r)
if got := w.Body.String(); !(got == responseText) { if got := w.Body.String(); !(got == responseText) {
t.Errorf("unexpected response got %q want %q", got, responseText) t.Errorf("unexpected response got %q want %q", got, responseText)
@ -201,6 +350,9 @@ func TestRouterNotAllowed(t *testing.T) {
if w.Code != http.StatusTeapot { if w.Code != http.StatusTeapot {
t.Errorf("unexpected response code %d want %d", w.Code, http.StatusTeapot) t.Errorf("unexpected response code %d want %d", w.Code, http.StatusTeapot)
} }
if allow := w.Header().Get("Allow"); allow != "POST, DELETE, OPTIONS" && allow != "DELETE, POST, OPTIONS" {
t.Error("unexpected Allow header value: " + allow)
}
} }
func TestRouterNotFound(t *testing.T) { func TestRouterNotFound(t *testing.T) {
@ -212,35 +364,35 @@ func TestRouterNotFound(t *testing.T) {
router.GET("/", handlerFunc) router.GET("/", handlerFunc)
testRoutes := []struct { testRoutes := []struct {
route string route string
code int code int
header string location string
}{ }{
{"/path/", 301, "map[Location:[/path]]"}, // TSR -/ {"/path/", 301, "/path"}, // TSR -/
{"/dir", 301, "map[Location:[/dir/]]"}, // TSR +/ {"/dir", 301, "/dir/"}, // TSR +/
{"", 301, "map[Location:[/]]"}, // TSR +/ {"", 301, "/"}, // TSR +/
{"/PATH", 301, "map[Location:[/path]]"}, // Fixed Case {"/PATH", 301, "/path"}, // Fixed Case
{"/DIR/", 301, "map[Location:[/dir/]]"}, // Fixed Case {"/DIR/", 301, "/dir/"}, // Fixed Case
{"/PATH/", 301, "map[Location:[/path]]"}, // Fixed Case -/ {"/PATH/", 301, "/path"}, // Fixed Case -/
{"/DIR", 301, "map[Location:[/dir/]]"}, // Fixed Case +/ {"/DIR", 301, "/dir/"}, // Fixed Case +/
{"/../path", 301, "map[Location:[/path]]"}, // CleanPath {"/../path", 301, "/path"}, // CleanPath
{"/nope", 404, ""}, // NotFound {"/nope", 404, ""}, // NotFound
} }
for _, tr := range testRoutes { for _, tr := range testRoutes {
r, _ := http.NewRequest("GET", tr.route, nil) r, _ := http.NewRequest("GET", tr.route, nil)
w := httptest.NewRecorder() w := httptest.NewRecorder()
router.ServeHTTP(w, r) router.ServeHTTP(w, r)
if !(w.Code == tr.code && (w.Code == 404 || fmt.Sprint(w.Header()) == tr.header)) { if !(w.Code == tr.code && (w.Code == 404 || fmt.Sprint(w.Header().Get("Location")) == tr.location)) {
t.Errorf("NotFound handling route %s failed: Code=%d, Header=%v", tr.route, w.Code, w.Header()) t.Errorf("NotFound handling route %s failed: Code=%d, Header=%v", tr.route, w.Code, w.Header().Get("Location"))
} }
} }
// Test custom not found handler // Test custom not found handler
var notFound bool var notFound bool
router.NotFound = func(rw http.ResponseWriter, r *http.Request) { router.NotFound = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
rw.WriteHeader(404) rw.WriteHeader(404)
notFound = true notFound = true
} })
r, _ := http.NewRequest("GET", "/nope", nil) r, _ := http.NewRequest("GET", "/nope", nil)
w := httptest.NewRecorder() w := httptest.NewRecorder()
router.ServeHTTP(w, r) router.ServeHTTP(w, r)

@ -7,6 +7,7 @@ package httprouter
import ( import (
"strings" "strings"
"unicode" "unicode"
"unicode/utf8"
) )
func min(a, b int) int { func min(a, b int) int {
@ -33,9 +34,10 @@ func countParams(path string) uint8 {
type nodeType uint8 type nodeType uint8
const ( const (
static nodeType = 0 static nodeType = iota // default
param nodeType = 1 root
catchAll nodeType = 2 param
catchAll
) )
type node struct { type node struct {
@ -58,9 +60,7 @@ func (n *node) incrementChildPrio(pos int) int {
newPos := pos newPos := pos
for newPos > 0 && n.children[newPos-1].priority < prio { for newPos > 0 && n.children[newPos-1].priority < prio {
// swap node positions // swap node positions
tmpN := n.children[newPos-1] n.children[newPos-1], n.children[newPos] = n.children[newPos], n.children[newPos-1]
n.children[newPos-1] = n.children[newPos]
n.children[newPos] = tmpN
newPos-- newPos--
} }
@ -105,6 +105,7 @@ func (n *node) addRoute(path string, handle Handle) {
child := node{ child := node{
path: n.path[i:], path: n.path[i:],
wildChild: n.wildChild, wildChild: n.wildChild,
nType: static,
indices: n.indices, indices: n.indices,
children: n.children, children: n.children,
handle: n.handle, handle: n.handle,
@ -141,16 +142,25 @@ func (n *node) addRoute(path string, handle Handle) {
numParams-- numParams--
// Check if the wildcard matches // Check if the wildcard matches
if len(path) >= len(n.path) && n.path == path[:len(n.path)] { if len(path) >= len(n.path) && n.path == path[:len(n.path)] &&
// check for longer wildcard, e.g. :name and :names // Check for longer wildcard, e.g. :name and :names
if len(n.path) >= len(path) || path[len(n.path)] == '/' { (len(n.path) >= len(path) || path[len(n.path)] == '/') {
continue walk continue walk
} else {
// Wildcard conflict
var pathSeg string
if n.nType == catchAll {
pathSeg = path
} else {
pathSeg = strings.SplitN(path, "/", 2)[0]
} }
prefix := fullPath[:strings.Index(fullPath, pathSeg)] + n.path
panic("'" + pathSeg +
"' in new path '" + fullPath +
"' conflicts with existing wildcard '" + n.path +
"' in existing prefix '" + prefix +
"'")
} }
panic("path segment '" + path +
"' conflicts with existing wildcard '" + n.path +
"' in path '" + fullPath + "'")
} }
c := path[0] c := path[0]
@ -187,7 +197,7 @@ func (n *node) addRoute(path string, handle Handle) {
} else if i == len(path) { // Make node a (in-path) leaf } else if i == len(path) { // Make node a (in-path) leaf
if n.handle != nil { if n.handle != nil {
panic("a handle is already registered for path ''" + fullPath + "'") panic("a handle is already registered for path '" + fullPath + "'")
} }
n.handle = handle n.handle = handle
} }
@ -195,6 +205,7 @@ func (n *node) addRoute(path string, handle Handle) {
} }
} else { // Empty tree } else { // Empty tree
n.insertChild(numParams, path, fullPath, handle) n.insertChild(numParams, path, fullPath, handle)
n.nType = root
} }
} }
@ -317,7 +328,7 @@ func (n *node) insertChild(numParams uint8, path, fullPath string, handle Handle
// made if a handle exists with an extra (without the) trailing slash for the // made if a handle exists with an extra (without the) trailing slash for the
// given path. // given path.
func (n *node) getValue(path string) (handle Handle, p Params, tsr bool) { func (n *node) getValue(path string) (handle Handle, p Params, tsr bool) {
walk: // Outer loop for walking the tree walk: // outer loop for walking the tree
for { for {
if len(path) > len(n.path) { if len(path) > len(n.path) {
if path[:len(n.path)] == n.path { if path[:len(n.path)] == n.path {
@ -411,6 +422,11 @@ walk: // Outer loop for walking the tree
return return
} }
if path == "/" && n.wildChild && n.nType != root {
tsr = true
return
}
// No handle found. Check if a handle for this path + a // No handle found. Check if a handle for this path + a
// trailing slash exists for trailing slash recommendation // trailing slash exists for trailing slash recommendation
for i := 0; i < len(n.indices); i++ { for i := 0; i < len(n.indices); i++ {
@ -439,34 +455,117 @@ walk: // Outer loop for walking the tree
// It returns the case-corrected path and a bool indicating whether the lookup // It returns the case-corrected path and a bool indicating whether the lookup
// was successful. // was successful.
func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) (ciPath []byte, found bool) { func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) (ciPath []byte, found bool) {
ciPath = make([]byte, 0, len(path)+1) // preallocate enough memory return n.findCaseInsensitivePathRec(
path,
strings.ToLower(path),
make([]byte, 0, len(path)+1), // preallocate enough memory for new path
[4]byte{}, // empty rune buffer
fixTrailingSlash,
)
}
// Outer loop for walking the tree // shift bytes in array by n bytes left
for len(path) >= len(n.path) && strings.ToLower(path[:len(n.path)]) == strings.ToLower(n.path) { func shiftNRuneBytes(rb [4]byte, n int) [4]byte {
path = path[len(n.path):] switch n {
case 0:
return rb
case 1:
return [4]byte{rb[1], rb[2], rb[3], 0}
case 2:
return [4]byte{rb[2], rb[3]}
case 3:
return [4]byte{rb[3]}
default:
return [4]byte{}
}
}
// recursive case-insensitive lookup function used by n.findCaseInsensitivePath
func (n *node) findCaseInsensitivePathRec(path, loPath string, ciPath []byte, rb [4]byte, fixTrailingSlash bool) ([]byte, bool) {
loNPath := strings.ToLower(n.path)
walk: // outer loop for walking the tree
for len(loPath) >= len(loNPath) && (len(loNPath) == 0 || loPath[1:len(loNPath)] == loNPath[1:]) {
// add common path to result
ciPath = append(ciPath, n.path...) ciPath = append(ciPath, n.path...)
if len(path) > 0 { if path = path[len(n.path):]; len(path) > 0 {
loOld := loPath
loPath = loPath[len(loNPath):]
// If this node does not have a wildcard (param or catchAll) child, // If this node does not have a wildcard (param or catchAll) child,
// we can just look up the next child node and continue to walk down // we can just look up the next child node and continue to walk down
// the tree // the tree
if !n.wildChild { if !n.wildChild {
r := unicode.ToLower(rune(path[0])) // skip rune bytes already processed
for i, index := range n.indices { rb = shiftNRuneBytes(rb, len(loNPath))
// must use recursive approach since both index and
// ToLower(index) could exist. We must check both. if rb[0] != 0 {
if r == unicode.ToLower(index) { // old rune not finished
out, found := n.children[i].findCaseInsensitivePath(path, fixTrailingSlash) for i := 0; i < len(n.indices); i++ {
if found { if n.indices[i] == rb[0] {
return append(ciPath, out...), true // continue with child node
n = n.children[i]
loNPath = strings.ToLower(n.path)
continue walk
}
}
} else {
// process a new rune
var rv rune
// find rune start
// runes are up to 4 byte long,
// -4 would definitely be another rune
var off int
for max := min(len(loNPath), 3); off < max; off++ {
if i := len(loNPath) - off; utf8.RuneStart(loOld[i]) {
// read rune from cached lowercase path
rv, _ = utf8.DecodeRuneInString(loOld[i:])
break
}
}
// calculate lowercase bytes of current rune
utf8.EncodeRune(rb[:], rv)
// skipp already processed bytes
rb = shiftNRuneBytes(rb, off)
for i := 0; i < len(n.indices); i++ {
// lowercase matches
if n.indices[i] == rb[0] {
// must use a recursive approach since both the
// uppercase byte and the lowercase byte might exist
// as an index
if out, found := n.children[i].findCaseInsensitivePathRec(
path, loPath, ciPath, rb, fixTrailingSlash,
); found {
return out, true
}
break
}
}
// same for uppercase rune, if it differs
if up := unicode.ToUpper(rv); up != rv {
utf8.EncodeRune(rb[:], up)
rb = shiftNRuneBytes(rb, off)
for i := 0; i < len(n.indices); i++ {
// uppercase matches
if n.indices[i] == rb[0] {
// continue with child node
n = n.children[i]
loNPath = strings.ToLower(n.path)
continue walk
}
} }
} }
} }
// Nothing found. We can recommend to redirect to the same URL // Nothing found. We can recommend to redirect to the same URL
// without a trailing slash if a leaf exists for that path // without a trailing slash if a leaf exists for that path
found = (fixTrailingSlash && path == "/" && n.handle != nil) return ciPath, (fixTrailingSlash && path == "/" && n.handle != nil)
return
} }
n = n.children[0] n = n.children[0]
@ -484,8 +583,11 @@ func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) (ciPa
// we need to go deeper! // we need to go deeper!
if k < len(path) { if k < len(path) {
if len(n.children) > 0 { if len(n.children) > 0 {
path = path[k:] // continue with child node
n = n.children[0] n = n.children[0]
loNPath = strings.ToLower(n.path)
loPath = loPath[k:]
path = path[k:]
continue continue
} }
@ -493,7 +595,7 @@ func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) (ciPa
if fixTrailingSlash && len(path) == k+1 { if fixTrailingSlash && len(path) == k+1 {
return ciPath, true return ciPath, true
} }
return return ciPath, false
} }
if n.handle != nil { if n.handle != nil {
@ -506,7 +608,7 @@ func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) (ciPa
return append(ciPath, '/'), true return append(ciPath, '/'), true
} }
} }
return return ciPath, false
case catchAll: case catchAll:
return append(ciPath, path...), true return append(ciPath, path...), true
@ -531,11 +633,11 @@ func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) (ciPa
(n.nType == catchAll && n.children[0].handle != nil) { (n.nType == catchAll && n.children[0].handle != nil) {
return append(ciPath, '/'), true return append(ciPath, '/'), true
} }
return return ciPath, false
} }
} }
} }
return return ciPath, false
} }
} }
@ -545,11 +647,10 @@ func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) (ciPa
if path == "/" { if path == "/" {
return ciPath, true return ciPath, true
} }
if len(path)+1 == len(n.path) && n.path[len(path)] == '/' && if len(loPath)+1 == len(loNPath) && loNPath[len(loPath)] == '/' &&
strings.ToLower(path) == strings.ToLower(n.path[:len(path)]) && loPath[1:] == loNPath[1:len(loPath)] && n.handle != nil {
n.handle != nil {
return append(ciPath, n.path...), true return append(ciPath, n.path...), true
} }
} }
return return ciPath, false
} }

@ -8,6 +8,7 @@ import (
"fmt" "fmt"
"net/http" "net/http"
"reflect" "reflect"
"regexp"
"strings" "strings"
"testing" "testing"
) )
@ -22,7 +23,7 @@ func printChildren(n *node, prefix string) {
} }
} }
// Used as a workaround since we can't compare functions or their adresses // Used as a workaround since we can't compare functions or their addresses
var fakeHandlerValue string var fakeHandlerValue string
func fakeHandler(val string) Handle { func fakeHandler(val string) Handle {
@ -89,7 +90,7 @@ func checkMaxParams(t *testing.T, n *node) uint8 {
maxParams = params maxParams = params
} }
} }
if n.nType != static && !n.wildChild { if n.nType > root && !n.wildChild {
maxParams++ maxParams++
} }
@ -394,6 +395,9 @@ func TestTreeTrailingSlashRedirect(t *testing.T) {
"/1/:id/2", "/1/:id/2",
"/aa", "/aa",
"/a/", "/a/",
"/admin",
"/admin/:category",
"/admin/:category/:page",
"/doc", "/doc",
"/doc/go_faq.html", "/doc/go_faq.html",
"/doc/go1.html", "/doc/go1.html",
@ -423,6 +427,9 @@ func TestTreeTrailingSlashRedirect(t *testing.T) {
"/0/go/", "/0/go/",
"/1/go", "/1/go",
"/a", "/a",
"/admin/",
"/admin/config/",
"/admin/config/permissions/",
"/doc/", "/doc/",
} }
for _, route := range tsrRoutes { for _, route := range tsrRoutes {
@ -452,6 +459,24 @@ func TestTreeTrailingSlashRedirect(t *testing.T) {
} }
} }
func TestTreeRootTrailingSlashRedirect(t *testing.T) {
tree := &node{}
recv := catchPanic(func() {
tree.addRoute("/:test", fakeHandler("/:test"))
})
if recv != nil {
t.Fatalf("panic inserting test route: %v", recv)
}
handler, _, tsr := tree.getValue("/")
if handler != nil {
t.Fatalf("non-nil handler")
} else if tsr {
t.Errorf("expected no TSR recommendation")
}
}
func TestTreeFindCaseInsensitivePath(t *testing.T) { func TestTreeFindCaseInsensitivePath(t *testing.T) {
tree := &node{} tree := &node{}
@ -478,6 +503,16 @@ func TestTreeFindCaseInsensitivePath(t *testing.T) {
"/doc/go/away", "/doc/go/away",
"/no/a", "/no/a",
"/no/b", "/no/b",
"/Π",
"/u/apfêl/",
"/u/äpfêl/",
"/u/öpfêl",
"/v/Äpfêl/",
"/v/Öpfêl",
"/w/♬", // 3 byte
"/w/♭/", // 3 byte, last byte differs
"/w/𠜎", // 4 byte
"/w/𠜏/", // 4 byte
} }
for _, route := range routes { for _, route := range routes {
@ -556,6 +591,20 @@ func TestTreeFindCaseInsensitivePath(t *testing.T) {
{"/DOC/", "/doc", true, true}, {"/DOC/", "/doc", true, true},
{"/NO", "", false, true}, {"/NO", "", false, true},
{"/DOC/GO", "", false, true}, {"/DOC/GO", "", false, true},
{"/π", "/Π", true, false},
{"/π/", "/Π", true, true},
{"/u/ÄPFÊL/", "/u/äpfêl/", true, false},
{"/u/ÄPFÊL", "/u/äpfêl/", true, true},
{"/u/ÖPFÊL/", "/u/öpfêl", true, true},
{"/u/ÖPFÊL", "/u/öpfêl", true, false},
{"/v/äpfêL/", "/v/Äpfêl/", true, false},
{"/v/äpfêL", "/v/Äpfêl/", true, true},
{"/v/öpfêL/", "/v/Öpfêl", true, true},
{"/v/öpfêL", "/v/Öpfêl", true, false},
{"/w/♬/", "/w/♬", true, true},
{"/w/♭", "/w/♭/", true, true},
{"/w/𠜎/", "/w/𠜎", true, true},
{"/w/𠜏", "/w/𠜏/", true, true},
} }
// With fixTrailingSlash = true // With fixTrailingSlash = true
for _, test := range tests { for _, test := range tests {
@ -609,3 +658,42 @@ func TestTreeInvalidNodeType(t *testing.T) {
t.Fatalf("Expected panic '"+panicMsg+"', got '%v'", recv) t.Fatalf("Expected panic '"+panicMsg+"', got '%v'", recv)
} }
} }
func TestTreeWildcardConflictEx(t *testing.T) {
conflicts := [...]struct {
route string
segPath string
existPath string
existSegPath string
}{
{"/who/are/foo", "/foo", `/who/are/\*you`, `/\*you`},
{"/who/are/foo/", "/foo/", `/who/are/\*you`, `/\*you`},
{"/who/are/foo/bar", "/foo/bar", `/who/are/\*you`, `/\*you`},
{"/conxxx", "xxx", `/con:tact`, `:tact`},
{"/conooo/xxx", "ooo", `/con:tact`, `:tact`},
}
for _, conflict := range conflicts {
// I have to re-create a 'tree', because the 'tree' will be
// in an inconsistent state when the loop recovers from the
// panic which threw by 'addRoute' function.
tree := &node{}
routes := [...]string{
"/con:tact",
"/who/are/*you",
"/who/foo/hello",
}
for _, route := range routes {
tree.addRoute(route, fakeHandler(route))
}
recv := catchPanic(func() {
tree.addRoute(conflict.route, fakeHandler(conflict.route))
})
if !regexp.MustCompile(fmt.Sprintf("'%s' in new path .* conflicts with existing wildcard '%s' in existing prefix '%s'", conflict.segPath, conflict.existSegPath, conflict.existPath)).MatchString(fmt.Sprint(recv)) {
t.Fatalf("invalid wildcard conflict error (%v)", recv)
}
}
}

@ -0,0 +1,30 @@
<!--
For bug reports, please provide the version or the Git commit hash for
which you have observed the behavior in question.
Note that we use GitHub issues for bugs and (uncontroversial) feature
requests (see below for details).
Please do *NOT* ask usage questions in GitHub issues. Usage questions make
most sense on the users mailing list, where more people are available to
potentially respond to your question, and the whole community can benefit
from the answers provided (perhaps your question has already been answered,
search the archive to find out):
https://groups.google.com/forum/#!forum/prometheus-users
While a GitHub issue is fine to track progress on an uncontroversial
feature request, many feature requests touch the best practices and
concepts of Prometheus as a whole and need to be discussed with the wider
developer community first. This is in particular true for a request to
reconsider a prior rejection of a feature request. Those overarching
discussions happen on the developer mailing list (GitHub issues, in
particular closed ones, are not tracked by the wider developer community
and thus inadequate):
https://groups.google.com/forum/#!forum/prometheus-developers
You can find more information at: https://prometheus.io/community/
-->

@ -7,6 +7,10 @@
_obj _obj
_test _test
# Examples
/examples/simple/simple
/examples/random/random
# Architecture specific extensions/prefixes # Architecture specific extensions/prefixes
*.[568vq] *.[568vq]
[568vq].out [568vq].out

@ -2,8 +2,12 @@ sudo: false
language: go language: go
go: go:
- 1.5.4 - 1.7.x # See README.md for current minimum version.
- 1.6.2 - 1.8.x
- 1.9.x
- 1.10.x
- 1.11.x
script: script:
- go test -short ./... - make check_license style unused test-short
- if [[ ! $TRAVIS_GO_VERSION =~ ^1\.(7|8|9)\.[x0-9]+$ ]]; then make staticcheck; fi

@ -1,18 +0,0 @@
The Prometheus project was started by Matt T. Proud (emeritus) and
Julius Volz in 2012.
Maintainers of this repository:
* Björn Rabenstein <beorn@soundcloud.com>
The following individuals have contributed code to this repository
(listed in alphabetical order):
* Bernerd Schaefer <bj.schaefer@gmail.com>
* Björn Rabenstein <beorn@soundcloud.com>
* Daniel Bornkessel <daniel@soundcloud.com>
* Jeff Younker <jeff@drinktomi.com>
* Julius Volz <julius.volz@gmail.com>
* Matt T. Proud <matt.proud@gmail.com>
* Tobias Schmidt <ts@soundcloud.com>

@ -1,3 +1,55 @@
## 0.9.0 / 2018-10-15
* [CHANGE] Go1.6 is no longer supported.
* [CHANGE] More refinements of the `Registry` consistency checks: Duplicated
labels are now detected, but inconsistent label dimensions are now allowed.
Collisions with the “magic” metric and label names in Summaries and
Histograms are detected now. #108 #417 #471
* [CHANGE] Changed `ProcessCollector` constructor. #219
* [CHANGE] Changed Go counter `go_memstats_heap_released_bytes_total` to gauge
`go_memstats_heap_released_bytes`. #229
* [CHANGE] Unexported `LabelPairSorter`. #453
* [CHANGE] Removed the `Untyped` metric from direct instrumentation. #340
* [CHANGE] Unexported `MetricVec`. #319
* [CHANGE] Removed deprecated `Set` method from `Counter` #247
* [CHANGE] Removed deprecated `RegisterOrGet` and `MustRegisterOrGet`. #247
* [CHANGE] API client: Introduced versioned packages.
* [FEATURE] A `Registerer` can be wrapped with prefixes and labels. #357
* [FEATURE] “Describe by collect” helper function. #239
* [FEATURE] Added package `testutil`. #58
* [FEATURE] Timestamp can be explicitly set for const metrics. #187
* [FEATURE] “Unchecked” collectors are possible now without cheating. #47
* [FEATURE] Pushing to the Pushgateway reworked in package `push` to support
many new features. (The old functions are still usable but deprecated.) #372
#341
* [FEATURE] Configurable connection limit for scrapes. #179
* [FEATURE] New HTTP middlewares to instrument `http.Handler` and
`http.RoundTripper`. The old middlewares and the pre-instrumented `/metrics`
handler are (strongly) deprecated. #316 #57 #101 #224
* [FEATURE] “Currying” for metric vectors. #320
* [FEATURE] A `Summary` can be created without quantiles. #118
* [FEATURE] Added a `Timer` helper type. #231
* [FEATURE] Added a Graphite bridge. #197
* [FEATURE] Help strings are now optional. #460
* [FEATURE] Added `process_virtual_memory_max_bytes` metric. #438 #440
* [FEATURE] Added `go_gc_cpu_fraction` and `go_threads` metrics. #281 #277
* [FEATURE] Added `promauto` package with auto-registering metrics. #385 #393
* [FEATURE] Add `SetToCurrentTime` method to `Gauge`. #259
* [FEATURE] API client: Add AlertManager, Status, and Target methods. #402
* [FEATURE] API client: Add admin methods. #398
* [FEATURE] API client: Support series API. #361
* [FEATURE] API client: Support querying label values.
* [ENHANCEMENT] Smarter creation of goroutines during scraping. Solves memory
usage spikes in certain situations. #369
* [ENHANCEMENT] Counters are now faster if dealing with integers only. #367
* [ENHANCEMENT] Improved label validation. #274 #335
* [BUGFIX] Creating a const metric with an invalid `Desc` returns an error. #460
* [BUGFIX] Histogram observations don't race any longer with exposition. #275
* [BUGFIX] Fixed goroutine leaks. #236 #472
* [BUGFIX] Fixed an error message for exponential histogram buckets. #467
* [BUGFIX] Fixed data race writing to the metric map. #401
* [BUGFIX] API client: Decode JSON on a 4xx respons but do not on 204
responses. #476 #414
## 0.8.0 / 2016-08-17 ## 0.8.0 / 2016-08-17
* [CHANGE] Registry is doing more consistency checks. This might break * [CHANGE] Registry is doing more consistency checks. This might break
existing setups that used to export inconsistent metrics. existing setups that used to export inconsistent metrics.

@ -2,9 +2,9 @@
Prometheus uses GitHub to manage reviews of pull requests. Prometheus uses GitHub to manage reviews of pull requests.
* If you have a trivial fix or improvement, go ahead and create a pull * If you have a trivial fix or improvement, go ahead and create a pull request,
request, addressing (with `@...`) one or more of the maintainers addressing (with `@...`) the maintainer of this repository (see
(see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.
* If you plan to do something more involved, first discuss your ideas * If you plan to do something more involved, first discuss your ideas
on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
@ -16,3 +16,5 @@ Prometheus uses GitHub to manage reviews of pull requests.
and the _Formatting and style_ section of Peter Bourgon's [Go: Best and the _Formatting and style_ section of Peter Bourgon's [Go: Best
Practices for Production Practices for Production
Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
* Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works)

@ -0,0 +1,23 @@
# This Dockerfile builds an image for a client_golang example.
#
# Use as (from the root for the client_golang repository):
# docker build -f examples/$name/Dockerfile -t prometheus/golang-example-$name .
# Builder image, where we build the example.
FROM golang:1 AS builder
WORKDIR /go/src/github.com/prometheus/client_golang
COPY . .
WORKDIR /go/src/github.com/prometheus/client_golang/prometheus
RUN go get -d
WORKDIR /go/src/github.com/prometheus/client_golang/examples/random
RUN CGO_ENABLED=0 GOOS=linux go build -a -tags netgo -ldflags '-w'
WORKDIR /go/src/github.com/prometheus/client_golang/examples/simple
RUN CGO_ENABLED=0 GOOS=linux go build -a -tags netgo -ldflags '-w'
# Final image.
FROM prom/busybox
LABEL maintainer="The Prometheus Authors <prometheus-developers@googlegroups.com>"
COPY --from=builder /go/src/github.com/prometheus/client_golang/examples/random \
/go/src/github.com/prometheus/client_golang/examples/simple ./
EXPOSE 8080
CMD echo Please run an example. Either /random or /simple

@ -0,0 +1,2 @@
* Krasi Georgiev <kgeorgie@redhat.com> for `api/...`
* Björn Rabenstein <beorn@soundcloud.com> for everything else

@ -0,0 +1,32 @@
# Copyright 2018 The Prometheus Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
include Makefile.common
# http.CloseNotifier is deprecated but we don't want to remove support
# from client_golang to not break anybody still using it.
STATICCHECK_IGNORE = \
github.com/prometheus/client_golang/prometheus/promhttp/delegator*.go:SA1019 \
github.com/prometheus/client_golang/prometheus/promhttp/instrument_server_test.go:SA1019 \
github.com/prometheus/client_golang/prometheus/http.go:SA1019
.PHONY: get_dep
get_dep:
@echo ">> getting dependencies"
$(GO) get -t ./...
.PHONY: test
test: get_dep common-test
.PHONY: test-short
test-short: get_dep common-test-short

@ -0,0 +1,132 @@
# Copyright 2018 The Prometheus Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A common Makefile that includes rules to be reused in different prometheus projects.
# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository!
# Example usage :
# Create the main Makefile in the root project directory.
# include Makefile.common
# customTarget:
# @echo ">> Running customTarget"
#
# Ensure GOBIN is not set during build so that promu is installed to the correct path
unexport GOBIN
GO ?= go
GOFMT ?= $(GO)fmt
FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
PROMU := $(FIRST_GOPATH)/bin/promu
STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck
GOVENDOR := $(FIRST_GOPATH)/bin/govendor
pkgs = ./...
PREFIX ?= $(shell pwd)
BIN_DIR ?= $(shell pwd)
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
DOCKER_REPO ?= prom
.PHONY: all
all: style staticcheck unused build test
# This rule is used to forward a target like "build" to "common-build". This
# allows a new "build" target to be defined in a Makefile which includes this
# one and override "common-build" without override warnings.
%: common-% ;
.PHONY: common-style
common-style:
@echo ">> checking code style"
@fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \
if [ -n "$${fmtRes}" ]; then \
echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \
echo "Please ensure you are using $$($(GO) version) for formatting code."; \
exit 1; \
fi
.PHONY: common-check_license
common-check_license:
@echo ">> checking license header"
@licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \
awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \
done); \
if [ -n "$${licRes}" ]; then \
echo "license header checking failed:"; echo "$${licRes}"; \
exit 1; \
fi
.PHONY: common-test-short
common-test-short:
@echo ">> running short tests"
$(GO) test -short $(pkgs)
.PHONY: common-test
common-test:
@echo ">> running all tests"
$(GO) test -race $(pkgs)
.PHONY: common-format
common-format:
@echo ">> formatting code"
$(GO) fmt $(pkgs)
.PHONY: common-vet
common-vet:
@echo ">> vetting code"
$(GO) vet $(pkgs)
.PHONY: common-staticcheck
common-staticcheck: $(STATICCHECK)
@echo ">> running staticcheck"
$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs)
.PHONY: common-unused
common-unused: $(GOVENDOR)
@echo ">> running check for unused packages"
@$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages'
.PHONY: common-build
common-build: promu
@echo ">> building binaries"
$(PROMU) build --prefix $(PREFIX)
.PHONY: common-tarball
common-tarball: promu
@echo ">> building release tarball"
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
.PHONY: common-docker
common-docker:
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" .
.PHONY: common-docker-publish
common-docker-publish:
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
.PHONY: common-docker-tag-latest
common-docker-tag-latest:
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):latest"
.PHONY: promu
promu:
GOOS= GOARCH= $(GO) get -u github.com/prometheus/promu
.PHONY: $(STATICCHECK)
$(STATICCHECK):
GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck
.PHONY: $(GOVENDOR)
$(GOVENDOR):
GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor

@ -1,12 +1,64 @@
# Prometheus Go client library # Prometheus Go client library
[![Build Status](https://travis-ci.org/prometheus/client_golang.svg?branch=master)](https://travis-ci.org/prometheus/client_golang) [![Build Status](https://travis-ci.org/prometheus/client_golang.svg?branch=master)](https://travis-ci.org/prometheus/client_golang)
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/client_golang)](https://goreportcard.com/report/github.com/prometheus/client_golang)
[![go-doc](https://godoc.org/github.com/prometheus/client_golang?status.svg)](https://godoc.org/github.com/prometheus/client_golang)
This is the [Go](http://golang.org) client library for This is the [Go](http://golang.org) client library for
[Prometheus](http://prometheus.io). It has two separate parts, one for [Prometheus](http://prometheus.io). It has two separate parts, one for
instrumenting application code, and one for creating clients that talk to the instrumenting application code, and one for creating clients that talk to the
Prometheus HTTP API. Prometheus HTTP API.
__This library requires Go1.7 or later.__
## Important note about releases, versioning, tagging, and stability
While our goal is to follow [Semantic Versioning](https://semver.org/), this
repository is still pre-1.0.0. To quote the
[Semantic Versioning spec](https://semver.org/#spec-item-4): “Anything may
change at any time. The public API should not be considered stable.” We know
that this is at odds with the widespread use of this library. However, just
declaring something 1.0.0 doesn't make it 1.0.0. Instead, we are working
towards a 1.0.0 release that actually deserves its major version number.
Having said that, we aim for always keeping the tip of master in a workable
state. We occasionally tag versions and track their changes in CHANGELOG.md,
but this happens mostly to keep dependency management tools happy and to give
people a handle they can talk about easily. In particular, all commits in the
master branch have passed the same testing and reviewing. There is no QA
process in place that would render tagged commits more stable or better tested
than others.
There is a plan behind the current (pre-1.0.0) versioning, though:
- v0.9 is the “production release”, currently tracked in the master
branch. “Patch” releases will usually be just bug fixes, indeed, but
important new features that do not require invasive code changes might also
be included in those. We do not plan any breaking changes from one v0.9.x
release to any later v0.9.y release, but nothing is guaranteed. Since the
master branch will eventually be switched over to track the upcoming v0.10
(see below), we recommend to tell your dependency management tool of choice
to use the latest v0.9.x release, at least for your production software. In
that way, you should get bug fixes and non-invasive, low-risk new features
without the need to change anything on your part.
- v0.10 is a planned release that will have a _lot_ of breaking changes
(despite being only a “minor” release in the Semantic Versioning terminology,
but as said, pre-1.0.0 means nothing is guaranteed). Essentially, we have
been piling up feature requests that require breaking changes for a while,
and they are all collected in the
[v0.10 milestone](https://github.com/prometheus/client_golang/milestone/2).
Since there will be so many breaking changes, the development for v0.10 is
currently not happening in the master branch, but in the
[dev-0.10 branch](https://github.com/prometheus/client_golang/tree/dev-0.10).
It will violently change for a while, and it will definitely be in a
non-working state now and then. It should only be used for sneak-peaks and
discussions of the new features and designs.
- Once v0.10 is ready for real-life use, it will be merged into the master
branch (which is the reason why you should lock your dependency management
tool to v0.9.x and only migrate to v0.10 when both you and v0.10 are ready
for it). In the ideal case, v0.10 will be the basis for the future v1.0
release, but we cannot provide an ETA at this time.
## Instrumenting applications ## Instrumenting applications
[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/prometheus)](http://gocover.io/github.com/prometheus/client_golang/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus) [![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/prometheus)](http://gocover.io/github.com/prometheus/client_golang/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus)
@ -14,8 +66,8 @@ Prometheus HTTP API.
The The
[`prometheus` directory](https://github.com/prometheus/client_golang/tree/master/prometheus) [`prometheus` directory](https://github.com/prometheus/client_golang/tree/master/prometheus)
contains the instrumentation library. See the contains the instrumentation library. See the
[best practices section](http://prometheus.io/docs/practices/naming/) of the [guide](https://prometheus.io/docs/guides/go-application/) on the Prometheus
Prometheus documentation to learn more about instrumenting applications. website to learn more about instrumenting applications.
The The
[`examples` directory](https://github.com/prometheus/client_golang/tree/master/examples) [`examples` directory](https://github.com/prometheus/client_golang/tree/master/examples)
@ -23,13 +75,14 @@ contains simple examples of instrumented code.
## Client for the Prometheus HTTP API ## Client for the Prometheus HTTP API
[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/api/prometheus)](http://gocover.io/github.com/prometheus/client_golang/api/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/api/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/api/prometheus) [![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/api/prometheus/v1)](http://gocover.io/github.com/prometheus/client_golang/api/prometheus/v1) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/api/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/api)
The The
[`api/prometheus` directory](https://github.com/prometheus/client_golang/tree/master/api/prometheus) [`api/prometheus` directory](https://github.com/prometheus/client_golang/tree/master/api/prometheus)
contains the client for the contains the client for the
[Prometheus HTTP API](http://prometheus.io/docs/querying/api/). It allows you [Prometheus HTTP API](http://prometheus.io/docs/querying/api/). It allows you
to write Go applications that query time series data from a Prometheus server. to write Go applications that query time series data from a Prometheus
server. It is still in alpha stage.
## Where is `model`, `extraction`, and `text`? ## Where is `model`, `extraction`, and `text`?

@ -0,0 +1,131 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.7
// Package api provides clients for the HTTP APIs.
package api
import (
"context"
"io/ioutil"
"net"
"net/http"
"net/url"
"path"
"strings"
"time"
)
// DefaultRoundTripper is used if no RoundTripper is set in Config.
var DefaultRoundTripper http.RoundTripper = &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
TLSHandshakeTimeout: 10 * time.Second,
}
// Config defines configuration parameters for a new client.
type Config struct {
// The address of the Prometheus to connect to.
Address string
// RoundTripper is used by the Client to drive HTTP requests. If not
// provided, DefaultRoundTripper will be used.
RoundTripper http.RoundTripper
}
func (cfg *Config) roundTripper() http.RoundTripper {
if cfg.RoundTripper == nil {
return DefaultRoundTripper
}
return cfg.RoundTripper
}
// Client is the interface for an API client.
type Client interface {
URL(ep string, args map[string]string) *url.URL
Do(context.Context, *http.Request) (*http.Response, []byte, error)
}
// NewClient returns a new Client.
//
// It is safe to use the returned Client from multiple goroutines.
func NewClient(cfg Config) (Client, error) {
u, err := url.Parse(cfg.Address)
if err != nil {
return nil, err
}
u.Path = strings.TrimRight(u.Path, "/")
return &httpClient{
endpoint: u,
client: http.Client{Transport: cfg.roundTripper()},
}, nil
}
type httpClient struct {
endpoint *url.URL
client http.Client
}
func (c *httpClient) URL(ep string, args map[string]string) *url.URL {
p := path.Join(c.endpoint.Path, ep)
for arg, val := range args {
arg = ":" + arg
p = strings.Replace(p, arg, val, -1)
}
u := *c.endpoint
u.Path = p
return &u
}
func (c *httpClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
if ctx != nil {
req = req.WithContext(ctx)
}
resp, err := c.client.Do(req)
defer func() {
if resp != nil {
resp.Body.Close()
}
}()
if err != nil {
return nil, nil, err
}
var body []byte
done := make(chan struct{})
go func() {
body, err = ioutil.ReadAll(resp.Body)
close(done)
}()
select {
case <-ctx.Done():
err = resp.Body.Close()
<-done
if err == nil {
err = ctx.Err()
}
case <-done:
}
return resp, body, err
}

@ -0,0 +1,115 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.7
package api
import (
"net/http"
"net/url"
"testing"
)
func TestConfig(t *testing.T) {
c := Config{}
if c.roundTripper() != DefaultRoundTripper {
t.Fatalf("expected default roundtripper for nil RoundTripper field")
}
}
func TestClientURL(t *testing.T) {
tests := []struct {
address string
endpoint string
args map[string]string
expected string
}{
{
address: "http://localhost:9090",
endpoint: "/test",
expected: "http://localhost:9090/test",
},
{
address: "http://localhost",
endpoint: "/test",
expected: "http://localhost/test",
},
{
address: "http://localhost:9090",
endpoint: "test",
expected: "http://localhost:9090/test",
},
{
address: "http://localhost:9090/prefix",
endpoint: "/test",
expected: "http://localhost:9090/prefix/test",
},
{
address: "https://localhost:9090/",
endpoint: "/test/",
expected: "https://localhost:9090/test",
},
{
address: "http://localhost:9090",
endpoint: "/test/:param",
args: map[string]string{
"param": "content",
},
expected: "http://localhost:9090/test/content",
},
{
address: "http://localhost:9090",
endpoint: "/test/:param/more/:param",
args: map[string]string{
"param": "content",
},
expected: "http://localhost:9090/test/content/more/content",
},
{
address: "http://localhost:9090",
endpoint: "/test/:param/more/:foo",
args: map[string]string{
"param": "content",
"foo": "bar",
},
expected: "http://localhost:9090/test/content/more/bar",
},
{
address: "http://localhost:9090",
endpoint: "/test/:param",
args: map[string]string{
"nonexistent": "content",
},
expected: "http://localhost:9090/test/:param",
},
}
for _, test := range tests {
ep, err := url.Parse(test.address)
if err != nil {
t.Fatal(err)
}
hclient := &httpClient{
endpoint: ep,
client: http.Client{Transport: DefaultRoundTripper},
}
u := hclient.URL(test.endpoint, test.args)
if u.String() != test.expected {
t.Errorf("unexpected result: got %s, want %s", u, test.expected)
continue
}
}
}

@ -1,345 +0,0 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package prometheus provides bindings to the Prometheus HTTP API:
// http://prometheus.io/docs/querying/api/
package prometheus
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"time"
"github.com/prometheus/common/model"
"golang.org/x/net/context"
"golang.org/x/net/context/ctxhttp"
)
const (
statusAPIError = 422
apiPrefix = "/api/v1"
epQuery = "/query"
epQueryRange = "/query_range"
epLabelValues = "/label/:name/values"
epSeries = "/series"
)
type ErrorType string
const (
// The different API error types.
ErrBadData ErrorType = "bad_data"
ErrTimeout = "timeout"
ErrCanceled = "canceled"
ErrExec = "execution"
ErrBadResponse = "bad_response"
)
// Error is an error returned by the API.
type Error struct {
Type ErrorType
Msg string
}
func (e *Error) Error() string {
return fmt.Sprintf("%s: %s", e.Type, e.Msg)
}
// CancelableTransport is like net.Transport but provides
// per-request cancelation functionality.
type CancelableTransport interface {
http.RoundTripper
CancelRequest(req *http.Request)
}
var DefaultTransport CancelableTransport = &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
}
// Config defines configuration parameters for a new client.
type Config struct {
// The address of the Prometheus to connect to.
Address string
// Transport is used by the Client to drive HTTP requests. If not
// provided, DefaultTransport will be used.
Transport CancelableTransport
}
func (cfg *Config) transport() CancelableTransport {
if cfg.Transport == nil {
return DefaultTransport
}
return cfg.Transport
}
type Client interface {
url(ep string, args map[string]string) *url.URL
do(context.Context, *http.Request) (*http.Response, []byte, error)
}
// New returns a new Client.
//
// It is safe to use the returned Client from multiple goroutines.
func New(cfg Config) (Client, error) {
u, err := url.Parse(cfg.Address)
if err != nil {
return nil, err
}
u.Path = strings.TrimRight(u.Path, "/") + apiPrefix
return &httpClient{
endpoint: u,
transport: cfg.transport(),
}, nil
}
type httpClient struct {
endpoint *url.URL
transport CancelableTransport
}
func (c *httpClient) url(ep string, args map[string]string) *url.URL {
p := path.Join(c.endpoint.Path, ep)
for arg, val := range args {
arg = ":" + arg
p = strings.Replace(p, arg, val, -1)
}
u := *c.endpoint
u.Path = p
return &u
}
func (c *httpClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
resp, err := ctxhttp.Do(ctx, &http.Client{Transport: c.transport}, req)
defer func() {
if resp != nil {
resp.Body.Close()
}
}()
if err != nil {
return nil, nil, err
}
var body []byte
done := make(chan struct{})
go func() {
body, err = ioutil.ReadAll(resp.Body)
close(done)
}()
select {
case <-ctx.Done():
err = resp.Body.Close()
<-done
if err == nil {
err = ctx.Err()
}
case <-done:
}
return resp, body, err
}
// apiClient wraps a regular client and processes successful API responses.
// Successful also includes responses that errored at the API level.
type apiClient struct {
Client
}
type apiResponse struct {
Status string `json:"status"`
Data json.RawMessage `json:"data"`
ErrorType ErrorType `json:"errorType"`
Error string `json:"error"`
}
func (c apiClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
resp, body, err := c.Client.do(ctx, req)
if err != nil {
return resp, body, err
}
code := resp.StatusCode
if code/100 != 2 && code != statusAPIError {
return resp, body, &Error{
Type: ErrBadResponse,
Msg: fmt.Sprintf("bad response code %d", resp.StatusCode),
}
}
var result apiResponse
if err = json.Unmarshal(body, &result); err != nil {
return resp, body, &Error{
Type: ErrBadResponse,
Msg: err.Error(),
}
}
if (code == statusAPIError) != (result.Status == "error") {
err = &Error{
Type: ErrBadResponse,
Msg: "inconsistent body for response code",
}
}
if code == statusAPIError && result.Status == "error" {
err = &Error{
Type: result.ErrorType,
Msg: result.Error,
}
}
return resp, []byte(result.Data), err
}
// Range represents a sliced time range.
type Range struct {
// The boundaries of the time range.
Start, End time.Time
// The maximum time between two slices within the boundaries.
Step time.Duration
}
// queryResult contains result data for a query.
type queryResult struct {
Type model.ValueType `json:"resultType"`
Result interface{} `json:"result"`
// The decoded value.
v model.Value
}
func (qr *queryResult) UnmarshalJSON(b []byte) error {
v := struct {
Type model.ValueType `json:"resultType"`
Result json.RawMessage `json:"result"`
}{}
err := json.Unmarshal(b, &v)
if err != nil {
return err
}
switch v.Type {
case model.ValScalar:
var sv model.Scalar
err = json.Unmarshal(v.Result, &sv)
qr.v = &sv
case model.ValVector:
var vv model.Vector
err = json.Unmarshal(v.Result, &vv)
qr.v = vv
case model.ValMatrix:
var mv model.Matrix
err = json.Unmarshal(v.Result, &mv)
qr.v = mv
default:
err = fmt.Errorf("unexpected value type %q", v.Type)
}
return err
}
// QueryAPI provides bindings the Prometheus's query API.
type QueryAPI interface {
// Query performs a query for the given time.
Query(ctx context.Context, query string, ts time.Time) (model.Value, error)
// Query performs a query for the given range.
QueryRange(ctx context.Context, query string, r Range) (model.Value, error)
}
// NewQueryAPI returns a new QueryAPI for the client.
//
// It is safe to use the returned QueryAPI from multiple goroutines.
func NewQueryAPI(c Client) QueryAPI {
return &httpQueryAPI{client: apiClient{c}}
}
type httpQueryAPI struct {
client Client
}
func (h *httpQueryAPI) Query(ctx context.Context, query string, ts time.Time) (model.Value, error) {
u := h.client.url(epQuery, nil)
q := u.Query()
q.Set("query", query)
q.Set("time", ts.Format(time.RFC3339Nano))
u.RawQuery = q.Encode()
req, _ := http.NewRequest("GET", u.String(), nil)
_, body, err := h.client.do(ctx, req)
if err != nil {
return nil, err
}
var qres queryResult
err = json.Unmarshal(body, &qres)
return model.Value(qres.v), err
}
func (h *httpQueryAPI) QueryRange(ctx context.Context, query string, r Range) (model.Value, error) {
u := h.client.url(epQueryRange, nil)
q := u.Query()
var (
start = r.Start.Format(time.RFC3339Nano)
end = r.End.Format(time.RFC3339Nano)
step = strconv.FormatFloat(r.Step.Seconds(), 'f', 3, 64)
)
q.Set("query", query)
q.Set("start", start)
q.Set("end", end)
q.Set("step", step)
u.RawQuery = q.Encode()
req, _ := http.NewRequest("GET", u.String(), nil)
_, body, err := h.client.do(ctx, req)
if err != nil {
return nil, err
}
var qres queryResult
err = json.Unmarshal(body, &qres)
return model.Value(qres.v), err
}

@ -1,453 +0,0 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"reflect"
"testing"
"time"
"github.com/prometheus/common/model"
"golang.org/x/net/context"
)
func TestConfig(t *testing.T) {
c := Config{}
if c.transport() != DefaultTransport {
t.Fatalf("expected default transport for nil Transport field")
}
}
func TestClientURL(t *testing.T) {
tests := []struct {
address string
endpoint string
args map[string]string
expected string
}{
{
address: "http://localhost:9090",
endpoint: "/test",
expected: "http://localhost:9090/test",
},
{
address: "http://localhost",
endpoint: "/test",
expected: "http://localhost/test",
},
{
address: "http://localhost:9090",
endpoint: "test",
expected: "http://localhost:9090/test",
},
{
address: "http://localhost:9090/prefix",
endpoint: "/test",
expected: "http://localhost:9090/prefix/test",
},
{
address: "https://localhost:9090/",
endpoint: "/test/",
expected: "https://localhost:9090/test",
},
{
address: "http://localhost:9090",
endpoint: "/test/:param",
args: map[string]string{
"param": "content",
},
expected: "http://localhost:9090/test/content",
},
{
address: "http://localhost:9090",
endpoint: "/test/:param/more/:param",
args: map[string]string{
"param": "content",
},
expected: "http://localhost:9090/test/content/more/content",
},
{
address: "http://localhost:9090",
endpoint: "/test/:param/more/:foo",
args: map[string]string{
"param": "content",
"foo": "bar",
},
expected: "http://localhost:9090/test/content/more/bar",
},
{
address: "http://localhost:9090",
endpoint: "/test/:param",
args: map[string]string{
"nonexistant": "content",
},
expected: "http://localhost:9090/test/:param",
},
}
for _, test := range tests {
ep, err := url.Parse(test.address)
if err != nil {
t.Fatal(err)
}
hclient := &httpClient{
endpoint: ep,
transport: DefaultTransport,
}
u := hclient.url(test.endpoint, test.args)
if u.String() != test.expected {
t.Errorf("unexpected result: got %s, want %s", u, test.expected)
continue
}
// The apiClient must return exactly the same result as the httpClient.
aclient := &apiClient{hclient}
u = aclient.url(test.endpoint, test.args)
if u.String() != test.expected {
t.Errorf("unexpected result: got %s, want %s", u, test.expected)
}
}
}
type testClient struct {
*testing.T
ch chan apiClientTest
req *http.Request
}
type apiClientTest struct {
code int
response interface{}
expected string
err *Error
}
func (c *testClient) url(ep string, args map[string]string) *url.URL {
return nil
}
func (c *testClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
if ctx == nil {
c.Fatalf("context was not passed down")
}
if req != c.req {
c.Fatalf("request was not passed down")
}
test := <-c.ch
var b []byte
var err error
switch v := test.response.(type) {
case string:
b = []byte(v)
default:
b, err = json.Marshal(v)
if err != nil {
c.Fatal(err)
}
}
resp := &http.Response{
StatusCode: test.code,
}
return resp, b, nil
}
func TestAPIClientDo(t *testing.T) {
tests := []apiClientTest{
{
response: &apiResponse{
Status: "error",
Data: json.RawMessage(`null`),
ErrorType: ErrBadData,
Error: "failed",
},
err: &Error{
Type: ErrBadData,
Msg: "failed",
},
code: statusAPIError,
expected: `null`,
},
{
response: &apiResponse{
Status: "error",
Data: json.RawMessage(`"test"`),
ErrorType: ErrTimeout,
Error: "timed out",
},
err: &Error{
Type: ErrTimeout,
Msg: "timed out",
},
code: statusAPIError,
expected: `test`,
},
{
response: "bad json",
err: &Error{
Type: ErrBadResponse,
Msg: "bad response code 400",
},
code: http.StatusBadRequest,
},
{
response: "bad json",
err: &Error{
Type: ErrBadResponse,
Msg: "invalid character 'b' looking for beginning of value",
},
code: statusAPIError,
},
{
response: &apiResponse{
Status: "success",
Data: json.RawMessage(`"test"`),
},
err: &Error{
Type: ErrBadResponse,
Msg: "inconsistent body for response code",
},
code: statusAPIError,
},
{
response: &apiResponse{
Status: "success",
Data: json.RawMessage(`"test"`),
ErrorType: ErrTimeout,
Error: "timed out",
},
err: &Error{
Type: ErrBadResponse,
Msg: "inconsistent body for response code",
},
code: statusAPIError,
},
{
response: &apiResponse{
Status: "error",
Data: json.RawMessage(`"test"`),
ErrorType: ErrTimeout,
Error: "timed out",
},
err: &Error{
Type: ErrBadResponse,
Msg: "inconsistent body for response code",
},
code: http.StatusOK,
},
}
tc := &testClient{
T: t,
ch: make(chan apiClientTest, 1),
req: &http.Request{},
}
client := &apiClient{tc}
for _, test := range tests {
tc.ch <- test
_, body, err := client.do(context.Background(), tc.req)
if test.err != nil {
if err == nil {
t.Errorf("expected error %q but got none", test.err)
continue
}
if test.err.Error() != err.Error() {
t.Errorf("unexpected error: want %q, got %q", test.err, err)
}
continue
}
if err != nil {
t.Errorf("unexpeceted error %s", err)
continue
}
want, got := test.expected, string(body)
if want != got {
t.Errorf("unexpected body: want %q, got %q", want, got)
}
}
}
type apiTestClient struct {
*testing.T
curTest apiTest
}
type apiTest struct {
do func() (interface{}, error)
inErr error
inRes interface{}
reqPath string
reqParam url.Values
reqMethod string
res interface{}
err error
}
func (c *apiTestClient) url(ep string, args map[string]string) *url.URL {
u := &url.URL{
Host: "test:9090",
Path: apiPrefix + ep,
}
return u
}
func (c *apiTestClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
test := c.curTest
if req.URL.Path != test.reqPath {
c.Errorf("unexpected request path: want %s, got %s", test.reqPath, req.URL.Path)
}
if req.Method != test.reqMethod {
c.Errorf("unexpected request method: want %s, got %s", test.reqMethod, req.Method)
}
b, err := json.Marshal(test.inRes)
if err != nil {
c.Fatal(err)
}
resp := &http.Response{}
if test.inErr != nil {
resp.StatusCode = statusAPIError
} else {
resp.StatusCode = http.StatusOK
}
return resp, b, test.inErr
}
func TestAPIs(t *testing.T) {
testTime := time.Now()
client := &apiTestClient{T: t}
queryApi := &httpQueryAPI{
client: client,
}
doQuery := func(q string, ts time.Time) func() (interface{}, error) {
return func() (interface{}, error) {
return queryApi.Query(context.Background(), q, ts)
}
}
doQueryRange := func(q string, rng Range) func() (interface{}, error) {
return func() (interface{}, error) {
return queryApi.QueryRange(context.Background(), q, rng)
}
}
queryTests := []apiTest{
{
do: doQuery("2", testTime),
inRes: &queryResult{
Type: model.ValScalar,
Result: &model.Scalar{
Value: 2,
Timestamp: model.TimeFromUnix(testTime.Unix()),
},
},
reqMethod: "GET",
reqPath: "/api/v1/query",
reqParam: url.Values{
"query": []string{"2"},
"time": []string{testTime.Format(time.RFC3339Nano)},
},
res: &model.Scalar{
Value: 2,
Timestamp: model.TimeFromUnix(testTime.Unix()),
},
},
{
do: doQuery("2", testTime),
inErr: fmt.Errorf("some error"),
reqMethod: "GET",
reqPath: "/api/v1/query",
reqParam: url.Values{
"query": []string{"2"},
"time": []string{testTime.Format(time.RFC3339Nano)},
},
err: fmt.Errorf("some error"),
},
{
do: doQueryRange("2", Range{
Start: testTime.Add(-time.Minute),
End: testTime,
Step: time.Minute,
}),
inErr: fmt.Errorf("some error"),
reqMethod: "GET",
reqPath: "/api/v1/query_range",
reqParam: url.Values{
"query": []string{"2"},
"start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)},
"end": []string{testTime.Format(time.RFC3339Nano)},
"step": []string{time.Minute.String()},
},
err: fmt.Errorf("some error"),
},
}
var tests []apiTest
tests = append(tests, queryTests...)
for _, test := range tests {
client.curTest = test
res, err := test.do()
if test.err != nil {
if err == nil {
t.Errorf("expected error %q but got none", test.err)
continue
}
if err.Error() != test.err.Error() {
t.Errorf("unexpected error: want %s, got %s", test.err, err)
}
continue
}
if err != nil {
t.Errorf("unexpected error: %s", err)
continue
}
if !reflect.DeepEqual(res, test.res) {
t.Errorf("unexpected result: want %v, got %v", test.res, res)
}
}
}

@ -0,0 +1,504 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.7
// Package v1 provides bindings to the Prometheus HTTP API v1:
// http://prometheus.io/docs/querying/api/
package v1
import (
"context"
"encoding/json"
"fmt"
"net/http"
"strconv"
"time"
"github.com/prometheus/client_golang/api"
"github.com/prometheus/common/model"
)
const (
statusAPIError = 422
apiPrefix = "/api/v1"
epAlertManagers = apiPrefix + "/alertmanagers"
epQuery = apiPrefix + "/query"
epQueryRange = apiPrefix + "/query_range"
epLabelValues = apiPrefix + "/label/:name/values"
epSeries = apiPrefix + "/series"
epTargets = apiPrefix + "/targets"
epSnapshot = apiPrefix + "/admin/tsdb/snapshot"
epDeleteSeries = apiPrefix + "/admin/tsdb/delete_series"
epCleanTombstones = apiPrefix + "/admin/tsdb/clean_tombstones"
epConfig = apiPrefix + "/status/config"
epFlags = apiPrefix + "/status/flags"
)
// ErrorType models the different API error types.
type ErrorType string
// HealthStatus models the health status of a scrape target.
type HealthStatus string
const (
// Possible values for ErrorType.
ErrBadData ErrorType = "bad_data"
ErrTimeout = "timeout"
ErrCanceled = "canceled"
ErrExec = "execution"
ErrBadResponse = "bad_response"
// Possible values for HealthStatus.
HealthGood HealthStatus = "up"
HealthUnknown HealthStatus = "unknown"
HealthBad HealthStatus = "down"
)
// Error is an error returned by the API.
type Error struct {
Type ErrorType
Msg string
}
func (e *Error) Error() string {
return fmt.Sprintf("%s: %s", e.Type, e.Msg)
}
// Range represents a sliced time range.
type Range struct {
// The boundaries of the time range.
Start, End time.Time
// The maximum time between two slices within the boundaries.
Step time.Duration
}
// API provides bindings for Prometheus's v1 API.
type API interface {
// AlertManagers returns an overview of the current state of the Prometheus alert manager discovery.
AlertManagers(ctx context.Context) (AlertManagersResult, error)
// CleanTombstones removes the deleted data from disk and cleans up the existing tombstones.
CleanTombstones(ctx context.Context) error
// Config returns the current Prometheus configuration.
Config(ctx context.Context) (ConfigResult, error)
// DeleteSeries deletes data for a selection of series in a time range.
DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error
// Flags returns the flag values that Prometheus was launched with.
Flags(ctx context.Context) (FlagsResult, error)
// LabelValues performs a query for the values of the given label.
LabelValues(ctx context.Context, label string) (model.LabelValues, error)
// Query performs a query for the given time.
Query(ctx context.Context, query string, ts time.Time) (model.Value, error)
// QueryRange performs a query for the given range.
QueryRange(ctx context.Context, query string, r Range) (model.Value, error)
// Series finds series by label matchers.
Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]model.LabelSet, error)
// Snapshot creates a snapshot of all current data into snapshots/<datetime>-<rand>
// under the TSDB's data directory and returns the directory as response.
Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error)
// Targets returns an overview of the current state of the Prometheus target discovery.
Targets(ctx context.Context) (TargetsResult, error)
}
// AlertManagersResult contains the result from querying the alertmanagers endpoint.
type AlertManagersResult struct {
Active []AlertManager `json:"activeAlertManagers"`
Dropped []AlertManager `json:"droppedAlertManagers"`
}
// AlertManager models a configured Alert Manager.
type AlertManager struct {
URL string `json:"url"`
}
// ConfigResult contains the result from querying the config endpoint.
type ConfigResult struct {
YAML string `json:"yaml"`
}
// FlagsResult contains the result from querying the flag endpoint.
type FlagsResult map[string]string
// SnapshotResult contains the result from querying the snapshot endpoint.
type SnapshotResult struct {
Name string `json:"name"`
}
// TargetsResult contains the result from querying the targets endpoint.
type TargetsResult struct {
Active []ActiveTarget `json:"activeTargets"`
Dropped []DroppedTarget `json:"droppedTargets"`
}
// ActiveTarget models an active Prometheus scrape target.
type ActiveTarget struct {
DiscoveredLabels model.LabelSet `json:"discoveredLabels"`
Labels model.LabelSet `json:"labels"`
ScrapeURL string `json:"scrapeUrl"`
LastError string `json:"lastError"`
LastScrape time.Time `json:"lastScrape"`
Health HealthStatus `json:"health"`
}
// DroppedTarget models a dropped Prometheus scrape target.
type DroppedTarget struct {
DiscoveredLabels model.LabelSet `json:"discoveredLabels"`
}
// queryResult contains result data for a query.
type queryResult struct {
Type model.ValueType `json:"resultType"`
Result interface{} `json:"result"`
// The decoded value.
v model.Value
}
func (qr *queryResult) UnmarshalJSON(b []byte) error {
v := struct {
Type model.ValueType `json:"resultType"`
Result json.RawMessage `json:"result"`
}{}
err := json.Unmarshal(b, &v)
if err != nil {
return err
}
switch v.Type {
case model.ValScalar:
var sv model.Scalar
err = json.Unmarshal(v.Result, &sv)
qr.v = &sv
case model.ValVector:
var vv model.Vector
err = json.Unmarshal(v.Result, &vv)
qr.v = vv
case model.ValMatrix:
var mv model.Matrix
err = json.Unmarshal(v.Result, &mv)
qr.v = mv
default:
err = fmt.Errorf("unexpected value type %q", v.Type)
}
return err
}
// NewAPI returns a new API for the client.
//
// It is safe to use the returned API from multiple goroutines.
func NewAPI(c api.Client) API {
return &httpAPI{client: apiClient{c}}
}
type httpAPI struct {
client api.Client
}
func (h *httpAPI) AlertManagers(ctx context.Context) (AlertManagersResult, error) {
u := h.client.URL(epAlertManagers, nil)
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
if err != nil {
return AlertManagersResult{}, err
}
_, body, err := h.client.Do(ctx, req)
if err != nil {
return AlertManagersResult{}, err
}
var res AlertManagersResult
err = json.Unmarshal(body, &res)
return res, err
}
func (h *httpAPI) CleanTombstones(ctx context.Context) error {
u := h.client.URL(epCleanTombstones, nil)
req, err := http.NewRequest(http.MethodPost, u.String(), nil)
if err != nil {
return err
}
_, _, err = h.client.Do(ctx, req)
return err
}
func (h *httpAPI) Config(ctx context.Context) (ConfigResult, error) {
u := h.client.URL(epConfig, nil)
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
if err != nil {
return ConfigResult{}, err
}
_, body, err := h.client.Do(ctx, req)
if err != nil {
return ConfigResult{}, err
}
var res ConfigResult
err = json.Unmarshal(body, &res)
return res, err
}
func (h *httpAPI) DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error {
u := h.client.URL(epDeleteSeries, nil)
q := u.Query()
for _, m := range matches {
q.Add("match[]", m)
}
q.Set("start", startTime.Format(time.RFC3339Nano))
q.Set("end", endTime.Format(time.RFC3339Nano))
u.RawQuery = q.Encode()
req, err := http.NewRequest(http.MethodPost, u.String(), nil)
if err != nil {
return err
}
_, _, err = h.client.Do(ctx, req)
return err
}
func (h *httpAPI) Flags(ctx context.Context) (FlagsResult, error) {
u := h.client.URL(epFlags, nil)
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
if err != nil {
return FlagsResult{}, err
}
_, body, err := h.client.Do(ctx, req)
if err != nil {
return FlagsResult{}, err
}
var res FlagsResult
err = json.Unmarshal(body, &res)
return res, err
}
func (h *httpAPI) LabelValues(ctx context.Context, label string) (model.LabelValues, error) {
u := h.client.URL(epLabelValues, map[string]string{"name": label})
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
_, body, err := h.client.Do(ctx, req)
if err != nil {
return nil, err
}
var labelValues model.LabelValues
err = json.Unmarshal(body, &labelValues)
return labelValues, err
}
func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time) (model.Value, error) {
u := h.client.URL(epQuery, nil)
q := u.Query()
q.Set("query", query)
if !ts.IsZero() {
q.Set("time", ts.Format(time.RFC3339Nano))
}
u.RawQuery = q.Encode()
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
_, body, err := h.client.Do(ctx, req)
if err != nil {
return nil, err
}
var qres queryResult
err = json.Unmarshal(body, &qres)
return model.Value(qres.v), err
}
func (h *httpAPI) QueryRange(ctx context.Context, query string, r Range) (model.Value, error) {
u := h.client.URL(epQueryRange, nil)
q := u.Query()
var (
start = r.Start.Format(time.RFC3339Nano)
end = r.End.Format(time.RFC3339Nano)
step = strconv.FormatFloat(r.Step.Seconds(), 'f', 3, 64)
)
q.Set("query", query)
q.Set("start", start)
q.Set("end", end)
q.Set("step", step)
u.RawQuery = q.Encode()
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
_, body, err := h.client.Do(ctx, req)
if err != nil {
return nil, err
}
var qres queryResult
err = json.Unmarshal(body, &qres)
return model.Value(qres.v), err
}
func (h *httpAPI) Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]model.LabelSet, error) {
u := h.client.URL(epSeries, nil)
q := u.Query()
for _, m := range matches {
q.Add("match[]", m)
}
q.Set("start", startTime.Format(time.RFC3339Nano))
q.Set("end", endTime.Format(time.RFC3339Nano))
u.RawQuery = q.Encode()
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
_, body, err := h.client.Do(ctx, req)
if err != nil {
return nil, err
}
var mset []model.LabelSet
err = json.Unmarshal(body, &mset)
return mset, err
}
func (h *httpAPI) Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error) {
u := h.client.URL(epSnapshot, nil)
q := u.Query()
q.Set("skip_head", strconv.FormatBool(skipHead))
u.RawQuery = q.Encode()
req, err := http.NewRequest(http.MethodPost, u.String(), nil)
if err != nil {
return SnapshotResult{}, err
}
_, body, err := h.client.Do(ctx, req)
if err != nil {
return SnapshotResult{}, err
}
var res SnapshotResult
err = json.Unmarshal(body, &res)
return res, err
}
func (h *httpAPI) Targets(ctx context.Context) (TargetsResult, error) {
u := h.client.URL(epTargets, nil)
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
if err != nil {
return TargetsResult{}, err
}
_, body, err := h.client.Do(ctx, req)
if err != nil {
return TargetsResult{}, err
}
var res TargetsResult
err = json.Unmarshal(body, &res)
return res, err
}
// apiClient wraps a regular client and processes successful API responses.
// Successful also includes responses that errored at the API level.
type apiClient struct {
api.Client
}
type apiResponse struct {
Status string `json:"status"`
Data json.RawMessage `json:"data"`
ErrorType ErrorType `json:"errorType"`
Error string `json:"error"`
}
func apiError(code int) bool {
// These are the codes that Prometheus sends when it returns an error.
return code == statusAPIError || code == http.StatusBadRequest
}
func (c apiClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
resp, body, err := c.Client.Do(ctx, req)
if err != nil {
return resp, body, err
}
code := resp.StatusCode
if code/100 != 2 && !apiError(code) {
return resp, body, &Error{
Type: ErrBadResponse,
Msg: fmt.Sprintf("bad response code %d", resp.StatusCode),
}
}
var result apiResponse
if http.StatusNoContent != code {
if err = json.Unmarshal(body, &result); err != nil {
return resp, body, &Error{
Type: ErrBadResponse,
Msg: err.Error(),
}
}
}
if apiError(code) != (result.Status == "error") {
err = &Error{
Type: ErrBadResponse,
Msg: "inconsistent body for response code",
}
}
if apiError(code) && result.Status == "error" {
err = &Error{
Type: result.ErrorType,
Msg: result.Error,
}
}
return resp, []byte(result.Data), err
}

@ -0,0 +1,706 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.7
package v1
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/url"
"reflect"
"strings"
"testing"
"time"
"github.com/prometheus/common/model"
)
type apiTest struct {
do func() (interface{}, error)
inErr error
inRes interface{}
reqPath string
reqParam url.Values
reqMethod string
res interface{}
err error
}
type apiTestClient struct {
*testing.T
curTest apiTest
}
func (c *apiTestClient) URL(ep string, args map[string]string) *url.URL {
path := ep
for k, v := range args {
path = strings.Replace(path, ":"+k, v, -1)
}
u := &url.URL{
Host: "test:9090",
Path: path,
}
return u
}
func (c *apiTestClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
test := c.curTest
if req.URL.Path != test.reqPath {
c.Errorf("unexpected request path: want %s, got %s", test.reqPath, req.URL.Path)
}
if req.Method != test.reqMethod {
c.Errorf("unexpected request method: want %s, got %s", test.reqMethod, req.Method)
}
b, err := json.Marshal(test.inRes)
if err != nil {
c.Fatal(err)
}
resp := &http.Response{}
if test.inErr != nil {
resp.StatusCode = statusAPIError
} else {
resp.StatusCode = http.StatusOK
}
return resp, b, test.inErr
}
func TestAPIs(t *testing.T) {
testTime := time.Now()
client := &apiTestClient{T: t}
promAPI := &httpAPI{
client: client,
}
doAlertManagers := func() func() (interface{}, error) {
return func() (interface{}, error) {
return promAPI.AlertManagers(context.Background())
}
}
doCleanTombstones := func() func() (interface{}, error) {
return func() (interface{}, error) {
return nil, promAPI.CleanTombstones(context.Background())
}
}
doConfig := func() func() (interface{}, error) {
return func() (interface{}, error) {
return promAPI.Config(context.Background())
}
}
doDeleteSeries := func(matcher string, startTime time.Time, endTime time.Time) func() (interface{}, error) {
return func() (interface{}, error) {
return nil, promAPI.DeleteSeries(context.Background(), []string{matcher}, startTime, endTime)
}
}
doFlags := func() func() (interface{}, error) {
return func() (interface{}, error) {
return promAPI.Flags(context.Background())
}
}
doLabelValues := func(label string) func() (interface{}, error) {
return func() (interface{}, error) {
return promAPI.LabelValues(context.Background(), label)
}
}
doQuery := func(q string, ts time.Time) func() (interface{}, error) {
return func() (interface{}, error) {
return promAPI.Query(context.Background(), q, ts)
}
}
doQueryRange := func(q string, rng Range) func() (interface{}, error) {
return func() (interface{}, error) {
return promAPI.QueryRange(context.Background(), q, rng)
}
}
doSeries := func(matcher string, startTime time.Time, endTime time.Time) func() (interface{}, error) {
return func() (interface{}, error) {
return promAPI.Series(context.Background(), []string{matcher}, startTime, endTime)
}
}
doSnapshot := func(skipHead bool) func() (interface{}, error) {
return func() (interface{}, error) {
return promAPI.Snapshot(context.Background(), skipHead)
}
}
doTargets := func() func() (interface{}, error) {
return func() (interface{}, error) {
return promAPI.Targets(context.Background())
}
}
queryTests := []apiTest{
{
do: doQuery("2", testTime),
inRes: &queryResult{
Type: model.ValScalar,
Result: &model.Scalar{
Value: 2,
Timestamp: model.TimeFromUnix(testTime.Unix()),
},
},
reqMethod: "GET",
reqPath: "/api/v1/query",
reqParam: url.Values{
"query": []string{"2"},
"time": []string{testTime.Format(time.RFC3339Nano)},
},
res: &model.Scalar{
Value: 2,
Timestamp: model.TimeFromUnix(testTime.Unix()),
},
},
{
do: doQuery("2", testTime),
inErr: fmt.Errorf("some error"),
reqMethod: "GET",
reqPath: "/api/v1/query",
reqParam: url.Values{
"query": []string{"2"},
"time": []string{testTime.Format(time.RFC3339Nano)},
},
err: fmt.Errorf("some error"),
},
{
do: doQueryRange("2", Range{
Start: testTime.Add(-time.Minute),
End: testTime,
Step: time.Minute,
}),
inErr: fmt.Errorf("some error"),
reqMethod: "GET",
reqPath: "/api/v1/query_range",
reqParam: url.Values{
"query": []string{"2"},
"start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)},
"end": []string{testTime.Format(time.RFC3339Nano)},
"step": []string{time.Minute.String()},
},
err: fmt.Errorf("some error"),
},
{
do: doLabelValues("mylabel"),
inRes: []string{"val1", "val2"},
reqMethod: "GET",
reqPath: "/api/v1/label/mylabel/values",
res: model.LabelValues{"val1", "val2"},
},
{
do: doLabelValues("mylabel"),
inErr: fmt.Errorf("some error"),
reqMethod: "GET",
reqPath: "/api/v1/label/mylabel/values",
err: fmt.Errorf("some error"),
},
{
do: doSeries("up", testTime.Add(-time.Minute), testTime),
inRes: []map[string]string{
{
"__name__": "up",
"job": "prometheus",
"instance": "localhost:9090"},
},
reqMethod: "GET",
reqPath: "/api/v1/series",
reqParam: url.Values{
"match": []string{"up"},
"start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)},
"end": []string{testTime.Format(time.RFC3339Nano)},
},
res: []model.LabelSet{
model.LabelSet{
"__name__": "up",
"job": "prometheus",
"instance": "localhost:9090",
},
},
},
{
do: doSeries("up", testTime.Add(-time.Minute), testTime),
inErr: fmt.Errorf("some error"),
reqMethod: "GET",
reqPath: "/api/v1/series",
reqParam: url.Values{
"match": []string{"up"},
"start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)},
"end": []string{testTime.Format(time.RFC3339Nano)},
},
err: fmt.Errorf("some error"),
},
{
do: doSnapshot(true),
inRes: map[string]string{
"name": "20171210T211224Z-2be650b6d019eb54",
},
reqMethod: "POST",
reqPath: "/api/v1/admin/tsdb/snapshot",
reqParam: url.Values{
"skip_head": []string{"true"},
},
res: SnapshotResult{
Name: "20171210T211224Z-2be650b6d019eb54",
},
},
{
do: doSnapshot(true),
inErr: fmt.Errorf("some error"),
reqMethod: "POST",
reqPath: "/api/v1/admin/tsdb/snapshot",
err: fmt.Errorf("some error"),
},
{
do: doCleanTombstones(),
reqMethod: "POST",
reqPath: "/api/v1/admin/tsdb/clean_tombstones",
},
{
do: doCleanTombstones(),
inErr: fmt.Errorf("some error"),
reqMethod: "POST",
reqPath: "/api/v1/admin/tsdb/clean_tombstones",
err: fmt.Errorf("some error"),
},
{
do: doDeleteSeries("up", testTime.Add(-time.Minute), testTime),
inRes: []map[string]string{
{
"__name__": "up",
"job": "prometheus",
"instance": "localhost:9090"},
},
reqMethod: "POST",
reqPath: "/api/v1/admin/tsdb/delete_series",
reqParam: url.Values{
"match": []string{"up"},
"start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)},
"end": []string{testTime.Format(time.RFC3339Nano)},
},
},
{
do: doDeleteSeries("up", testTime.Add(-time.Minute), testTime),
inErr: fmt.Errorf("some error"),
reqMethod: "POST",
reqPath: "/api/v1/admin/tsdb/delete_series",
reqParam: url.Values{
"match": []string{"up"},
"start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)},
"end": []string{testTime.Format(time.RFC3339Nano)},
},
err: fmt.Errorf("some error"),
},
{
do: doConfig(),
reqMethod: "GET",
reqPath: "/api/v1/status/config",
inRes: map[string]string{
"yaml": "<content of the loaded config file in YAML>",
},
res: ConfigResult{
YAML: "<content of the loaded config file in YAML>",
},
},
{
do: doConfig(),
reqMethod: "GET",
reqPath: "/api/v1/status/config",
inErr: fmt.Errorf("some error"),
err: fmt.Errorf("some error"),
},
{
do: doFlags(),
reqMethod: "GET",
reqPath: "/api/v1/status/flags",
inRes: map[string]string{
"alertmanager.notification-queue-capacity": "10000",
"alertmanager.timeout": "10s",
"log.level": "info",
"query.lookback-delta": "5m",
"query.max-concurrency": "20",
},
res: FlagsResult{
"alertmanager.notification-queue-capacity": "10000",
"alertmanager.timeout": "10s",
"log.level": "info",
"query.lookback-delta": "5m",
"query.max-concurrency": "20",
},
},
{
do: doFlags(),
reqMethod: "GET",
reqPath: "/api/v1/status/flags",
inErr: fmt.Errorf("some error"),
err: fmt.Errorf("some error"),
},
{
do: doAlertManagers(),
reqMethod: "GET",
reqPath: "/api/v1/alertmanagers",
inRes: map[string]interface{}{
"activeAlertManagers": []map[string]string{
{
"url": "http://127.0.0.1:9091/api/v1/alerts",
},
},
"droppedAlertManagers": []map[string]string{
{
"url": "http://127.0.0.1:9092/api/v1/alerts",
},
},
},
res: AlertManagersResult{
Active: []AlertManager{
{
URL: "http://127.0.0.1:9091/api/v1/alerts",
},
},
Dropped: []AlertManager{
{
URL: "http://127.0.0.1:9092/api/v1/alerts",
},
},
},
},
{
do: doAlertManagers(),
reqMethod: "GET",
reqPath: "/api/v1/alertmanagers",
inErr: fmt.Errorf("some error"),
err: fmt.Errorf("some error"),
},
{
do: doTargets(),
reqMethod: "GET",
reqPath: "/api/v1/targets",
inRes: map[string]interface{}{
"activeTargets": []map[string]interface{}{
{
"discoveredLabels": map[string]string{
"__address__": "127.0.0.1:9090",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"job": "prometheus",
},
"labels": map[string]string{
"instance": "127.0.0.1:9090",
"job": "prometheus",
},
"scrapeUrl": "http://127.0.0.1:9090",
"lastError": "error while scraping target",
"lastScrape": testTime.UTC().Format(time.RFC3339Nano),
"health": "up",
},
},
"droppedTargets": []map[string]interface{}{
{
"discoveredLabels": map[string]string{
"__address__": "127.0.0.1:9100",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"job": "node",
},
},
},
},
res: TargetsResult{
Active: []ActiveTarget{
{
DiscoveredLabels: model.LabelSet{
"__address__": "127.0.0.1:9090",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"job": "prometheus",
},
Labels: model.LabelSet{
"instance": "127.0.0.1:9090",
"job": "prometheus",
},
ScrapeURL: "http://127.0.0.1:9090",
LastError: "error while scraping target",
LastScrape: testTime.UTC(),
Health: HealthGood,
},
},
Dropped: []DroppedTarget{
{
DiscoveredLabels: model.LabelSet{
"__address__": "127.0.0.1:9100",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"job": "node",
},
},
},
},
},
{
do: doTargets(),
reqMethod: "GET",
reqPath: "/api/v1/targets",
inErr: fmt.Errorf("some error"),
err: fmt.Errorf("some error"),
},
}
var tests []apiTest
tests = append(tests, queryTests...)
for _, test := range tests {
client.curTest = test
res, err := test.do()
if test.err != nil {
if err == nil {
t.Errorf("expected error %q but got none", test.err)
continue
}
if err.Error() != test.err.Error() {
t.Errorf("unexpected error: want %s, got %s", test.err, err)
}
continue
}
if err != nil {
t.Errorf("unexpected error: %s", err)
continue
}
if !reflect.DeepEqual(res, test.res) {
t.Errorf("unexpected result: want %v, got %v", test.res, res)
}
}
}
type testClient struct {
*testing.T
ch chan apiClientTest
req *http.Request
}
type apiClientTest struct {
code int
response interface{}
expected string
err *Error
}
func (c *testClient) URL(ep string, args map[string]string) *url.URL {
return nil
}
func (c *testClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
if ctx == nil {
c.Fatalf("context was not passed down")
}
if req != c.req {
c.Fatalf("request was not passed down")
}
test := <-c.ch
var b []byte
var err error
switch v := test.response.(type) {
case string:
b = []byte(v)
default:
b, err = json.Marshal(v)
if err != nil {
c.Fatal(err)
}
}
resp := &http.Response{
StatusCode: test.code,
}
return resp, b, nil
}
func TestAPIClientDo(t *testing.T) {
tests := []apiClientTest{
{
response: &apiResponse{
Status: "error",
Data: json.RawMessage(`null`),
ErrorType: ErrBadData,
Error: "failed",
},
err: &Error{
Type: ErrBadData,
Msg: "failed",
},
code: statusAPIError,
expected: `null`,
},
{
response: &apiResponse{
Status: "error",
Data: json.RawMessage(`"test"`),
ErrorType: ErrTimeout,
Error: "timed out",
},
err: &Error{
Type: ErrTimeout,
Msg: "timed out",
},
code: statusAPIError,
expected: `test`,
},
{
response: "bad json",
err: &Error{
Type: ErrBadResponse,
Msg: "bad response code 500",
},
code: http.StatusInternalServerError,
},
{
response: &apiResponse{
Status: "error",
Data: json.RawMessage(`null`),
ErrorType: ErrBadData,
Error: "end timestamp must not be before start time",
},
err: &Error{
Type: ErrBadData,
Msg: "end timestamp must not be before start time",
},
code: http.StatusBadRequest,
},
{
response: "bad json",
err: &Error{
Type: ErrBadResponse,
Msg: "invalid character 'b' looking for beginning of value",
},
code: statusAPIError,
},
{
response: &apiResponse{
Status: "success",
Data: json.RawMessage(`"test"`),
},
err: &Error{
Type: ErrBadResponse,
Msg: "inconsistent body for response code",
},
code: statusAPIError,
},
{
response: &apiResponse{
Status: "success",
Data: json.RawMessage(`"test"`),
ErrorType: ErrTimeout,
Error: "timed out",
},
err: &Error{
Type: ErrBadResponse,
Msg: "inconsistent body for response code",
},
code: statusAPIError,
},
{
response: &apiResponse{
Status: "error",
Data: json.RawMessage(`"test"`),
ErrorType: ErrTimeout,
Error: "timed out",
},
err: &Error{
Type: ErrBadResponse,
Msg: "inconsistent body for response code",
},
code: http.StatusOK,
},
}
tc := &testClient{
T: t,
ch: make(chan apiClientTest, 1),
req: &http.Request{},
}
client := &apiClient{tc}
for _, test := range tests {
tc.ch <- test
_, body, err := client.Do(context.Background(), tc.req)
if test.err != nil {
if err == nil {
t.Errorf("expected error %q but got none", test.err)
continue
}
if test.err.Error() != err.Error() {
t.Errorf("unexpected error: want %q, got %q", test.err, err)
}
continue
}
if err != nil {
t.Errorf("unexpeceted error %s", err)
continue
}
want, got := test.expected, string(body)
if want != got {
t.Errorf("unexpected body: want %q, got %q", want, got)
}
}
}

@ -18,19 +18,21 @@ package main
import ( import (
"flag" "flag"
"log"
"math" "math"
"math/rand" "math/rand"
"net/http" "net/http"
"time" "time"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
) )
var ( var (
addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.") addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.")
uniformDomain = flag.Float64("uniform.domain", 200, "The domain for the uniform distribution.") uniformDomain = flag.Float64("uniform.domain", 0.0002, "The domain for the uniform distribution.")
normDomain = flag.Float64("normal.domain", 200, "The domain for the normal distribution.") normDomain = flag.Float64("normal.domain", 0.0002, "The domain for the normal distribution.")
normMean = flag.Float64("normal.mean", 10, "The mean for the normal distribution.") normMean = flag.Float64("normal.mean", 0.00001, "The mean for the normal distribution.")
oscillationPeriod = flag.Duration("oscillation-period", 10*time.Minute, "The duration of the rate oscillation period.") oscillationPeriod = flag.Duration("oscillation-period", 10*time.Minute, "The duration of the rate oscillation period.")
) )
@ -40,8 +42,9 @@ var (
// differentiated via a "service" label. // differentiated via a "service" label.
rpcDurations = prometheus.NewSummaryVec( rpcDurations = prometheus.NewSummaryVec(
prometheus.SummaryOpts{ prometheus.SummaryOpts{
Name: "rpc_durations_microseconds", Name: "rpc_durations_seconds",
Help: "RPC latency distributions.", Help: "RPC latency distributions.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}, },
[]string{"service"}, []string{"service"},
) )
@ -50,7 +53,7 @@ var (
// normal distribution, with 20 buckets centered on the mean, each // normal distribution, with 20 buckets centered on the mean, each
// half-sigma wide. // half-sigma wide.
rpcDurationsHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{ rpcDurationsHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "rpc_durations_histogram_microseconds", Name: "rpc_durations_histogram_seconds",
Help: "RPC latency distributions.", Help: "RPC latency distributions.",
Buckets: prometheus.LinearBuckets(*normMean-5**normDomain, .5**normDomain, 20), Buckets: prometheus.LinearBuckets(*normMean-5**normDomain, .5**normDomain, 20),
}) })
@ -91,13 +94,13 @@ func main() {
go func() { go func() {
for { for {
v := rand.ExpFloat64() v := rand.ExpFloat64() / 1e6
rpcDurations.WithLabelValues("exponential").Observe(v) rpcDurations.WithLabelValues("exponential").Observe(v)
time.Sleep(time.Duration(50*oscillationFactor()) * time.Millisecond) time.Sleep(time.Duration(50*oscillationFactor()) * time.Millisecond)
} }
}() }()
// Expose the registered metrics via HTTP. // Expose the registered metrics via HTTP.
http.Handle("/metrics", prometheus.Handler()) http.Handle("/metrics", promhttp.Handler())
http.ListenAndServe(*addr, nil) log.Fatal(http.ListenAndServe(*addr, nil))
} }

@ -16,15 +16,16 @@ package main
import ( import (
"flag" "flag"
"log"
"net/http" "net/http"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp"
) )
var addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.") var addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.")
func main() { func main() {
flag.Parse() flag.Parse()
http.Handle("/metrics", prometheus.Handler()) http.Handle("/metrics", promhttp.Handler())
http.ListenAndServe(*addr, nil) log.Fatal(http.ListenAndServe(*addr, nil))
} }

@ -129,8 +129,9 @@ func BenchmarkGaugeNoLabels(b *testing.B) {
func BenchmarkSummaryWithLabelValues(b *testing.B) { func BenchmarkSummaryWithLabelValues(b *testing.B) {
m := NewSummaryVec( m := NewSummaryVec(
SummaryOpts{ SummaryOpts{
Name: "benchmark_summary", Name: "benchmark_summary",
Help: "A summary to benchmark it.", Help: "A summary to benchmark it.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}, },
[]string{"one", "two", "three"}, []string{"one", "two", "three"},
) )
@ -143,8 +144,9 @@ func BenchmarkSummaryWithLabelValues(b *testing.B) {
func BenchmarkSummaryNoLabels(b *testing.B) { func BenchmarkSummaryNoLabels(b *testing.B) {
m := NewSummary(SummaryOpts{ m := NewSummary(SummaryOpts{
Name: "benchmark_summary", Name: "benchmark_summary",
Help: "A summary to benchmark it.", Help: "A summary to benchmark it.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}, },
) )
b.ReportAllocs() b.ReportAllocs()
@ -181,3 +183,17 @@ func BenchmarkHistogramNoLabels(b *testing.B) {
m.Observe(3.1415) m.Observe(3.1415)
} }
} }
func BenchmarkParallelCounter(b *testing.B) {
c := NewCounter(CounterOpts{
Name: "benchmark_counter",
Help: "A Counter to benchmark it.",
})
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
c.Inc()
}
})
}

@ -29,27 +29,72 @@ type Collector interface {
// collected by this Collector to the provided channel and returns once // collected by this Collector to the provided channel and returns once
// the last descriptor has been sent. The sent descriptors fulfill the // the last descriptor has been sent. The sent descriptors fulfill the
// consistency and uniqueness requirements described in the Desc // consistency and uniqueness requirements described in the Desc
// documentation. (It is valid if one and the same Collector sends // documentation.
// duplicate descriptors. Those duplicates are simply ignored. However, //
// two different Collectors must not send duplicate descriptors.) This // It is valid if one and the same Collector sends duplicate
// method idempotently sends the same descriptors throughout the // descriptors. Those duplicates are simply ignored. However, two
// lifetime of the Collector. If a Collector encounters an error while // different Collectors must not send duplicate descriptors.
// executing this method, it must send an invalid descriptor (created //
// with NewInvalidDesc) to signal the error to the registry. // Sending no descriptor at all marks the Collector as “unchecked”,
// i.e. no checks will be performed at registration time, and the
// Collector may yield any Metric it sees fit in its Collect method.
//
// This method idempotently sends the same descriptors throughout the
// lifetime of the Collector. It may be called concurrently and
// therefore must be implemented in a concurrency safe way.
//
// If a Collector encounters an error while executing this method, it
// must send an invalid descriptor (created with NewInvalidDesc) to
// signal the error to the registry.
Describe(chan<- *Desc) Describe(chan<- *Desc)
// Collect is called by the Prometheus registry when collecting // Collect is called by the Prometheus registry when collecting
// metrics. The implementation sends each collected metric via the // metrics. The implementation sends each collected metric via the
// provided channel and returns once the last metric has been sent. The // provided channel and returns once the last metric has been sent. The
// descriptor of each sent metric is one of those returned by // descriptor of each sent metric is one of those returned by Describe
// Describe. Returned metrics that share the same descriptor must differ // (unless the Collector is unchecked, see above). Returned metrics that
// in their variable label values. This method may be called // share the same descriptor must differ in their variable label
// concurrently and must therefore be implemented in a concurrency safe // values.
// way. Blocking occurs at the expense of total performance of rendering //
// all registered metrics. Ideally, Collector implementations support // This method may be called concurrently and must therefore be
// concurrent readers. // implemented in a concurrency safe way. Blocking occurs at the expense
// of total performance of rendering all registered metrics. Ideally,
// Collector implementations support concurrent readers.
Collect(chan<- Metric) Collect(chan<- Metric)
} }
// DescribeByCollect is a helper to implement the Describe method of a custom
// Collector. It collects the metrics from the provided Collector and sends
// their descriptors to the provided channel.
//
// If a Collector collects the same metrics throughout its lifetime, its
// Describe method can simply be implemented as:
//
// func (c customCollector) Describe(ch chan<- *Desc) {
// DescribeByCollect(c, ch)
// }
//
// However, this will not work if the metrics collected change dynamically over
// the lifetime of the Collector in a way that their combined set of descriptors
// changes as well. The shortcut implementation will then violate the contract
// of the Describe method. If a Collector sometimes collects no metrics at all
// (for example vectors like CounterVec, GaugeVec, etc., which only collect
// metrics after a metric with a fully specified label set has been accessed),
// it might even get registered as an unchecked Collecter (cf. the Register
// method of the Registerer interface). Hence, only use this shortcut
// implementation of Describe if you are certain to fulfill the contract.
//
// The Collector example demonstrates a use of DescribeByCollect.
func DescribeByCollect(c Collector, descs chan<- *Desc) {
metrics := make(chan Metric)
go func() {
c.Collect(metrics)
close(metrics)
}()
for m := range metrics {
descs <- m.Desc()
}
}
// selfCollector implements Collector for a single Metric so that the Metric // selfCollector implements Collector for a single Metric so that the Metric
// collects itself. Add it as an anonymous field to a struct that implements // collects itself. Add it as an anonymous field to a struct that implements
// Metric, and call init with the Metric itself as an argument. // Metric, and call init with the Metric itself as an argument.

@ -0,0 +1,62 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import "testing"
type collectorDescribedByCollect struct {
cnt Counter
gge Gauge
}
func (c collectorDescribedByCollect) Collect(ch chan<- Metric) {
ch <- c.cnt
ch <- c.gge
}
func (c collectorDescribedByCollect) Describe(ch chan<- *Desc) {
DescribeByCollect(c, ch)
}
func TestDescribeByCollect(t *testing.T) {
goodCollector := collectorDescribedByCollect{
cnt: NewCounter(CounterOpts{Name: "c1", Help: "help c1"}),
gge: NewGauge(GaugeOpts{Name: "g1", Help: "help g1"}),
}
collidingCollector := collectorDescribedByCollect{
cnt: NewCounter(CounterOpts{Name: "c2", Help: "help c2"}),
gge: NewGauge(GaugeOpts{Name: "g1", Help: "help g1"}),
}
inconsistentCollector := collectorDescribedByCollect{
cnt: NewCounter(CounterOpts{Name: "c3", Help: "help c3"}),
gge: NewGauge(GaugeOpts{Name: "c3", Help: "help inconsistent"}),
}
reg := NewPedanticRegistry()
if err := reg.Register(goodCollector); err != nil {
t.Error("registration failed:", err)
}
if err := reg.Register(collidingCollector); err == nil {
t.Error("registration unexpectedly succeeded")
}
if err := reg.Register(inconsistentCollector); err == nil {
t.Error("registration unexpectedly succeeded")
}
if _, err := reg.Gather(); err != nil {
t.Error("gathering failed:", err)
}
}

@ -15,6 +15,10 @@ package prometheus
import ( import (
"errors" "errors"
"math"
"sync/atomic"
dto "github.com/prometheus/client_model/go"
) )
// Counter is a Metric that represents a single numerical value that only ever // Counter is a Metric that represents a single numerical value that only ever
@ -30,16 +34,8 @@ type Counter interface {
Metric Metric
Collector Collector
// Set is used to set the Counter to an arbitrary value. It is only used // Inc increments the counter by 1. Use Add to increment it by arbitrary
// if you have to transfer a value from an external counter into this // non-negative values.
// Prometheus metric. Do not use it for regular handling of a
// Prometheus counter (as it can be used to break the contract of
// monotonically increasing values).
//
// Deprecated: Use NewConstMetric to create a counter for an external
// value. A Counter should never be set.
Set(float64)
// Inc increments the counter by 1.
Inc() Inc()
// Add adds the given value to the counter. It panics if the value is < // Add adds the given value to the counter. It panics if the value is <
// 0. // 0.
@ -50,6 +46,14 @@ type Counter interface {
type CounterOpts Opts type CounterOpts Opts
// NewCounter creates a new Counter based on the provided CounterOpts. // NewCounter creates a new Counter based on the provided CounterOpts.
//
// The returned implementation tracks the counter value in two separate
// variables, a float64 and a uint64. The latter is used to track calls of the
// Inc method and calls of the Add method with a value that can be represented
// as a uint64. This allows atomic increments of the counter with optimal
// performance. (It is common to have an Inc call in very hot execution paths.)
// Both internal tracking values are added up in the Write method. This has to
// be taken into account when it comes to precision and overflow behavior.
func NewCounter(opts CounterOpts) Counter { func NewCounter(opts CounterOpts) Counter {
desc := NewDesc( desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
@ -57,20 +61,58 @@ func NewCounter(opts CounterOpts) Counter {
nil, nil,
opts.ConstLabels, opts.ConstLabels,
) )
result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}} result := &counter{desc: desc, labelPairs: desc.constLabelPairs}
result.init(result) // Init self-collection. result.init(result) // Init self-collection.
return result return result
} }
type counter struct { type counter struct {
value // valBits contains the bits of the represented float64 value, while
// valInt stores values that are exact integers. Both have to go first
// in the struct to guarantee alignment for atomic operations.
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
valBits uint64
valInt uint64
selfCollector
desc *Desc
labelPairs []*dto.LabelPair
}
func (c *counter) Desc() *Desc {
return c.desc
} }
func (c *counter) Add(v float64) { func (c *counter) Add(v float64) {
if v < 0 { if v < 0 {
panic(errors.New("counter cannot decrease in value")) panic(errors.New("counter cannot decrease in value"))
} }
c.value.Add(v) ival := uint64(v)
if float64(ival) == v {
atomic.AddUint64(&c.valInt, ival)
return
}
for {
oldBits := atomic.LoadUint64(&c.valBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
return
}
}
}
func (c *counter) Inc() {
atomic.AddUint64(&c.valInt, 1)
}
func (c *counter) Write(out *dto.Metric) error {
fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
ival := atomic.LoadUint64(&c.valInt)
val := fval + float64(ival)
return populateMetric(CounterValue, val, c.labelPairs, out)
} }
// CounterVec is a Collector that bundles a set of Counters that all share the // CounterVec is a Collector that bundles a set of Counters that all share the
@ -78,16 +120,12 @@ func (c *counter) Add(v float64) {
// if you want to count the same thing partitioned by various dimensions // if you want to count the same thing partitioned by various dimensions
// (e.g. number of HTTP requests, partitioned by response code and // (e.g. number of HTTP requests, partitioned by response code and
// method). Create instances with NewCounterVec. // method). Create instances with NewCounterVec.
//
// CounterVec embeds MetricVec. See there for a full list of methods with
// detailed documentation.
type CounterVec struct { type CounterVec struct {
*MetricVec *metricVec
} }
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and // NewCounterVec creates a new CounterVec based on the provided CounterOpts and
// partitioned by the given label names. At least one label name must be // partitioned by the given label names.
// provided.
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
desc := NewDesc( desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
@ -96,34 +134,62 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
opts.ConstLabels, opts.ConstLabels,
) )
return &CounterVec{ return &CounterVec{
MetricVec: newMetricVec(desc, func(lvs ...string) Metric { metricVec: newMetricVec(desc, func(lvs ...string) Metric {
result := &counter{value: value{ if len(lvs) != len(desc.variableLabels) {
desc: desc, panic(errInconsistentCardinality)
valType: CounterValue, }
labelPairs: makeLabelPairs(desc, lvs), result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
}}
result.init(result) // Init self-collection. result.init(result) // Init self-collection.
return result return result
}), }),
} }
} }
// GetMetricWithLabelValues replaces the method of the same name in // GetMetricWithLabelValues returns the Counter for the given slice of label
// MetricVec. The difference is that this method returns a Counter and not a // values (same order as the VariableLabels in Desc). If that combination of
// Metric so that no type conversion is required. // label values is accessed for the first time, a new Counter is created.
func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { //
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) // It is possible to call this method without using the returned Counter to only
// create the new Counter but leave it at its starting value 0. See also the
// SummaryVec example.
//
// Keeping the Counter for later use is possible (and should be considered if
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
// Delete can be used to delete the Counter from the CounterVec. In that case,
// the Counter will still exist, but it will not be exported anymore, even if a
// Counter with the same label values is created later.
//
// An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
// an alternative to avoid that type of mistake. For higher label numbers, the
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example.
func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
if metric != nil { if metric != nil {
return metric.(Counter), err return metric.(Counter), err
} }
return nil, err return nil, err
} }
// GetMetricWith replaces the method of the same name in MetricVec. The // GetMetricWith returns the Counter for the given Labels map (the label names
// difference is that this method returns a Counter and not a Metric so that no // must match those of the VariableLabels in Desc). If that label map is
// type conversion is required. // accessed for the first time, a new Counter is created. Implications of
func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) { // creating a Counter without using it and keeping the Counter for later use are
metric, err := m.MetricVec.GetMetricWith(labels) // the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
metric, err := v.metricVec.getMetricWith(labels)
if metric != nil { if metric != nil {
return metric.(Counter), err return metric.(Counter), err
} }
@ -131,18 +197,57 @@ func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
} }
// WithLabelValues works as GetMetricWithLabelValues, but panics where // WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. By not returning an // GetMetricWithLabelValues would have returned an error. Not returning an
// error, WithLabelValues allows shortcuts like // error allows shortcuts like
// myVec.WithLabelValues("404", "GET").Add(42) // myVec.WithLabelValues("404", "GET").Add(42)
func (m *CounterVec) WithLabelValues(lvs ...string) Counter { func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
return m.MetricVec.WithLabelValues(lvs...).(Counter) c, err := v.GetMetricWithLabelValues(lvs...)
if err != nil {
panic(err)
}
return c
} }
// With works as GetMetricWith, but panics where GetMetricWithLabels would have // With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. By not returning an error, With allows shortcuts like // returned an error. Not returning an error allows shortcuts like
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) // myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
func (m *CounterVec) With(labels Labels) Counter { func (v *CounterVec) With(labels Labels) Counter {
return m.MetricVec.With(labels).(Counter) c, err := v.GetMetricWith(labels)
if err != nil {
panic(err)
}
return c
}
// CurryWith returns a vector curried with the provided labels, i.e. the
// returned vector has those labels pre-set for all labeled operations performed
// on it. The cardinality of the curried vector is reduced accordingly. The
// order of the remaining labels stays the same (just with the curried labels
// taken out of the sequence which is relevant for the
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
// vector, but only with labels not yet used for currying before.
//
// The metrics contained in the CounterVec are shared between the curried and
// uncurried vectors. They are just accessed differently. Curried and uncurried
// vectors behave identically in terms of collection. Only one must be
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {
vec, err := v.curryWith(labels)
if vec != nil {
return &CounterVec{vec}, err
}
return nil, err
}
// MustCurryWith works as CurryWith but panics where CurryWith would have
// returned an error.
func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec {
vec, err := v.CurryWith(labels)
if err != nil {
panic(err)
}
return vec
} }
// CounterFunc is a Counter whose value is determined at collect time by calling a // CounterFunc is a Counter whose value is determined at collect time by calling a

@ -14,6 +14,7 @@
package prometheus package prometheus
import ( import (
"fmt"
"math" "math"
"testing" "testing"
@ -27,13 +28,27 @@ func TestCounterAdd(t *testing.T) {
ConstLabels: Labels{"a": "1", "b": "2"}, ConstLabels: Labels{"a": "1", "b": "2"},
}).(*counter) }).(*counter)
counter.Inc() counter.Inc()
if expected, got := 1., math.Float64frombits(counter.valBits); expected != got { if expected, got := 0.0, math.Float64frombits(counter.valBits); expected != got {
t.Errorf("Expected %f, got %f.", expected, got) t.Errorf("Expected %f, got %f.", expected, got)
} }
if expected, got := uint64(1), counter.valInt; expected != got {
t.Errorf("Expected %d, got %d.", expected, got)
}
counter.Add(42) counter.Add(42)
if expected, got := 43., math.Float64frombits(counter.valBits); expected != got { if expected, got := 0.0, math.Float64frombits(counter.valBits); expected != got {
t.Errorf("Expected %f, got %f.", expected, got) t.Errorf("Expected %f, got %f.", expected, got)
} }
if expected, got := uint64(43), counter.valInt; expected != got {
t.Errorf("Expected %d, got %d.", expected, got)
}
counter.Add(24.42)
if expected, got := 24.42, math.Float64frombits(counter.valBits); expected != got {
t.Errorf("Expected %f, got %f.", expected, got)
}
if expected, got := uint64(43), counter.valInt; expected != got {
t.Errorf("Expected %d, got %d.", expected, got)
}
if expected, got := "counter cannot decrease in value", decreaseCounter(counter).Error(); expected != got { if expected, got := "counter cannot decrease in value", decreaseCounter(counter).Error(); expected != got {
t.Errorf("Expected error %q, got %q.", expected, got) t.Errorf("Expected error %q, got %q.", expected, got)
@ -42,7 +57,7 @@ func TestCounterAdd(t *testing.T) {
m := &dto.Metric{} m := &dto.Metric{}
counter.Write(m) counter.Write(m)
if expected, got := `label:<name:"a" value:"1" > label:<name:"b" value:"2" > counter:<value:43 > `, m.String(); expected != got { if expected, got := `label:<name:"a" value:"1" > label:<name:"b" value:"2" > counter:<value:67.42 > `, m.String(); expected != got {
t.Errorf("expected %q, got %q", expected, got) t.Errorf("expected %q, got %q", expected, got)
} }
} }
@ -56,3 +71,142 @@ func decreaseCounter(c *counter) (err error) {
c.Add(-1) c.Add(-1)
return nil return nil
} }
func TestCounterVecGetMetricWithInvalidLabelValues(t *testing.T) {
testCases := []struct {
desc string
labels Labels
}{
{
desc: "non utf8 label value",
labels: Labels{"a": "\xFF"},
},
{
desc: "not enough label values",
labels: Labels{},
},
{
desc: "too many label values",
labels: Labels{"a": "1", "b": "2"},
},
}
for _, test := range testCases {
counterVec := NewCounterVec(CounterOpts{
Name: "test",
}, []string{"a"})
labelValues := make([]string, len(test.labels))
for _, val := range test.labels {
labelValues = append(labelValues, val)
}
expectPanic(t, func() {
counterVec.WithLabelValues(labelValues...)
}, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc))
expectPanic(t, func() {
counterVec.With(test.labels)
}, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc))
if _, err := counterVec.GetMetricWithLabelValues(labelValues...); err == nil {
t.Errorf("GetMetricWithLabelValues: expected error because: %s", test.desc)
}
if _, err := counterVec.GetMetricWith(test.labels); err == nil {
t.Errorf("GetMetricWith: expected error because: %s", test.desc)
}
}
}
func expectPanic(t *testing.T, op func(), errorMsg string) {
defer func() {
if err := recover(); err == nil {
t.Error(errorMsg)
}
}()
op()
}
func TestCounterAddInf(t *testing.T) {
counter := NewCounter(CounterOpts{
Name: "test",
Help: "test help",
}).(*counter)
counter.Inc()
if expected, got := 0.0, math.Float64frombits(counter.valBits); expected != got {
t.Errorf("Expected %f, got %f.", expected, got)
}
if expected, got := uint64(1), counter.valInt; expected != got {
t.Errorf("Expected %d, got %d.", expected, got)
}
counter.Add(math.Inf(1))
if expected, got := math.Inf(1), math.Float64frombits(counter.valBits); expected != got {
t.Errorf("valBits expected %f, got %f.", expected, got)
}
if expected, got := uint64(1), counter.valInt; expected != got {
t.Errorf("valInts expected %d, got %d.", expected, got)
}
counter.Inc()
if expected, got := math.Inf(1), math.Float64frombits(counter.valBits); expected != got {
t.Errorf("Expected %f, got %f.", expected, got)
}
if expected, got := uint64(2), counter.valInt; expected != got {
t.Errorf("Expected %d, got %d.", expected, got)
}
m := &dto.Metric{}
counter.Write(m)
if expected, got := `counter:<value:inf > `, m.String(); expected != got {
t.Errorf("expected %q, got %q", expected, got)
}
}
func TestCounterAddLarge(t *testing.T) {
counter := NewCounter(CounterOpts{
Name: "test",
Help: "test help",
}).(*counter)
// large overflows the underlying type and should therefore be stored in valBits.
large := float64(math.MaxUint64 + 1)
counter.Add(large)
if expected, got := large, math.Float64frombits(counter.valBits); expected != got {
t.Errorf("valBits expected %f, got %f.", expected, got)
}
if expected, got := uint64(0), counter.valInt; expected != got {
t.Errorf("valInts expected %d, got %d.", expected, got)
}
m := &dto.Metric{}
counter.Write(m)
if expected, got := fmt.Sprintf("counter:<value:%0.16e > ", large), m.String(); expected != got {
t.Errorf("expected %q, got %q", expected, got)
}
}
func TestCounterAddSmall(t *testing.T) {
counter := NewCounter(CounterOpts{
Name: "test",
Help: "test help",
}).(*counter)
small := 0.000000000001
counter.Add(small)
if expected, got := small, math.Float64frombits(counter.valBits); expected != got {
t.Errorf("valBits expected %f, got %f.", expected, got)
}
if expected, got := uint64(0), counter.valInt; expected != got {
t.Errorf("valInts expected %d, got %d.", expected, got)
}
m := &dto.Metric{}
counter.Write(m)
if expected, got := fmt.Sprintf("counter:<value:%0.0e > ", small), m.String(); expected != got {
t.Errorf("expected %q, got %q", expected, got)
}
}

@ -16,33 +16,15 @@ package prometheus
import ( import (
"errors" "errors"
"fmt" "fmt"
"regexp"
"sort" "sort"
"strings" "strings"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
) )
var (
metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
)
// reservedLabelPrefix is a prefix which is not legal in user-supplied
// label names.
const reservedLabelPrefix = "__"
// Labels represents a collection of label name -> value mappings. This type is
// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
// metric vector Collectors, e.g.:
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
//
// The other use-case is the specification of constant label pairs in Opts or to
// create a Desc.
type Labels map[string]string
// Desc is the descriptor used by every Prometheus Metric. It is essentially // Desc is the descriptor used by every Prometheus Metric. It is essentially
// the immutable meta-data of a Metric. The normal Metric implementations // the immutable meta-data of a Metric. The normal Metric implementations
// included in this package manage their Desc under the hood. Users only have to // included in this package manage their Desc under the hood. Users only have to
@ -78,32 +60,27 @@ type Desc struct {
// Help string. Each Desc with the same fqName must have the same // Help string. Each Desc with the same fqName must have the same
// dimHash. // dimHash.
dimHash uint64 dimHash uint64
// err is an error that occured during construction. It is reported on // err is an error that occurred during construction. It is reported on
// registration time. // registration time.
err error err error
} }
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc // NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
// and will be reported on registration time. variableLabels and constLabels can // and will be reported on registration time. variableLabels and constLabels can
// be nil if no such labels should be set. fqName and help must not be empty. // be nil if no such labels should be set. fqName must not be empty.
// //
// variableLabels only contain the label names. Their label values are variable // variableLabels only contain the label names. Their label values are variable
// and therefore not part of the Desc. (They are managed within the Metric.) // and therefore not part of the Desc. (They are managed within the Metric.)
// //
// For constLabels, the label values are constant. Therefore, they are fully // For constLabels, the label values are constant. Therefore, they are fully
// specified in the Desc. See the Opts documentation for the implications of // specified in the Desc. See the Collector example for a usage pattern.
// constant labels.
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
d := &Desc{ d := &Desc{
fqName: fqName, fqName: fqName,
help: help, help: help,
variableLabels: variableLabels, variableLabels: variableLabels,
} }
if help == "" { if !model.IsValidMetricName(model.LabelValue(fqName)) {
d.err = errors.New("empty help string")
return d
}
if !metricNameRE.MatchString(fqName) {
d.err = fmt.Errorf("%q is not a valid metric name", fqName) d.err = fmt.Errorf("%q is not a valid metric name", fqName)
return d return d
} }
@ -127,6 +104,12 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
for _, labelName := range labelNames { for _, labelName := range labelNames {
labelValues = append(labelValues, constLabels[labelName]) labelValues = append(labelValues, constLabels[labelName])
} }
// Validate the const label values. They can't have a wrong cardinality, so
// use in len(labelValues) as expectedNumberOfValues.
if err := validateLabelValues(labelValues, len(labelValues)); err != nil {
d.err = err
return d
}
// Now add the variable label names, but prefix them with something that // Now add the variable label names, but prefix them with something that
// cannot be in a regular label name. That prevents matching the label // cannot be in a regular label name. That prevents matching the label
// dimension with a different mix between preset and variable labels. // dimension with a different mix between preset and variable labels.
@ -142,6 +125,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
d.err = errors.New("duplicate label names") d.err = errors.New("duplicate label names")
return d return d
} }
vh := hashNew() vh := hashNew()
for _, val := range labelValues { for _, val := range labelValues {
vh = hashAdd(vh, val) vh = hashAdd(vh, val)
@ -168,7 +152,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
Value: proto.String(v), Value: proto.String(v),
}) })
} }
sort.Sort(LabelPairSorter(d.constLabelPairs)) sort.Sort(labelPairSorter(d.constLabelPairs))
return d return d
} }
@ -198,8 +182,3 @@ func (d *Desc) String() string {
d.variableLabels, d.variableLabels,
) )
} }
func checkLabelName(l string) bool {
return labelNameRE.MatchString(l) &&
!strings.HasPrefix(l, reservedLabelPrefix)
}

@ -0,0 +1,30 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"testing"
)
func TestNewDescInvalidLabelValues(t *testing.T) {
desc := NewDesc(
"sample_label",
"sample label",
nil,
Labels{"a": "\xFF"},
)
if desc.err == nil {
t.Errorf("NewDesc: expected error because: %s", desc.err)
}
}

@ -11,13 +11,15 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// Package prometheus provides metrics primitives to instrument code for // Package prometheus is the core instrumentation package. It provides metrics
// monitoring. It also offers a registry for metrics. Sub-packages allow to // primitives to instrument code for monitoring. It also offers a registry for
// expose the registered metrics via HTTP (package promhttp) or push them to a // metrics. Sub-packages allow to expose the registered metrics via HTTP
// Pushgateway (package push). // (package promhttp) or push them to a Pushgateway (package push). There is
// also a sub-package promauto, which provides metrics constructors with
// automatic registration.
// //
// All exported functions and methods are safe to be used concurrently unless // All exported functions and methods are safe to be used concurrently unless
//specified otherwise. // specified otherwise.
// //
// A Basic Example // A Basic Example
// //
@ -26,6 +28,7 @@
// package main // package main
// //
// import ( // import (
// "log"
// "net/http" // "net/http"
// //
// "github.com/prometheus/client_golang/prometheus" // "github.com/prometheus/client_golang/prometheus"
@ -59,7 +62,7 @@
// // The Handler function provides a default handler to expose metrics // // The Handler function provides a default handler to expose metrics
// // via an HTTP server. "/metrics" is the usual endpoint for that. // // via an HTTP server. "/metrics" is the usual endpoint for that.
// http.Handle("/metrics", promhttp.Handler()) // http.Handle("/metrics", promhttp.Handler())
// http.ListenAndServe(":8080", nil) // log.Fatal(http.ListenAndServe(":8080", nil))
// } // }
// //
// //
@ -69,9 +72,12 @@
// Metrics // Metrics
// //
// The number of exported identifiers in this package might appear a bit // The number of exported identifiers in this package might appear a bit
// overwhelming. Hovever, in addition to the basic plumbing shown in the example // overwhelming. However, in addition to the basic plumbing shown in the example
// above, you only need to understand the different metric types and their // above, you only need to understand the different metric types and their
// vector versions for basic usage. // vector versions for basic usage. Furthermore, if you are not concerned with
// fine-grained control of when and how to register metrics with the registry,
// have a look at the promauto package, which will effectively allow you to
// ignore registration altogether in simple cases.
// //
// Above, you have already touched the Counter and the Gauge. There are two more // Above, you have already touched the Counter and the Gauge. There are two more
// advanced metric types: the Summary and Histogram. A more thorough description // advanced metric types: the Summary and Histogram. A more thorough description
@ -95,8 +101,8 @@
// SummaryVec, HistogramVec, and UntypedVec are not. // SummaryVec, HistogramVec, and UntypedVec are not.
// //
// To create instances of Metrics and their vector versions, you need a suitable // To create instances of Metrics and their vector versions, you need a suitable
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, // …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or
// HistogramOpts, or UntypedOpts. // UntypedOpts.
// //
// Custom Collectors and constant Metrics // Custom Collectors and constant Metrics
// //
@ -114,8 +120,18 @@
// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and // Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
// NewConstSummary (and their respective Must… versions). That will happen in // NewConstSummary (and their respective Must… versions). That will happen in
// the Collect method. The Describe method has to return separate Desc // the Collect method. The Describe method has to return separate Desc
// instances, representative of the “throw-away” metrics to be created // instances, representative of the “throw-away” metrics to be created later.
// later. NewDesc comes in handy to create those Desc instances. // NewDesc comes in handy to create those Desc instances. Alternatively, you
// could return no Desc at all, which will marke the Collector “unchecked”. No
// checks are porformed at registration time, but metric consistency will still
// be ensured at scrape time, i.e. any inconsistencies will lead to scrape
// errors. Thus, with unchecked Collectors, the responsibility to not collect
// metrics that lead to inconsistencies in the total scrape result lies with the
// implementer of the Collector. While this is not a desirable state, it is
// sometimes necessary. The typical use case is a situatios where the exact
// metrics to be returned by a Collector cannot be predicted at registration
// time, but the implementer has sufficient knowledge of the whole system to
// guarantee metric consistency.
// //
// The Collector example illustrates the use case. You can also look at the // The Collector example illustrates the use case. You can also look at the
// source code of the processCollector (mirroring process metrics), the // source code of the processCollector (mirroring process metrics), the
@ -129,34 +145,34 @@
// Advanced Uses of the Registry // Advanced Uses of the Registry
// //
// While MustRegister is the by far most common way of registering a Collector, // While MustRegister is the by far most common way of registering a Collector,
// sometimes you might want to handle the errors the registration might // sometimes you might want to handle the errors the registration might cause.
// cause. As suggested by the name, MustRegister panics if an error occurs. With // As suggested by the name, MustRegister panics if an error occurs. With the
// the Register function, the error is returned and can be handled. // Register function, the error is returned and can be handled.
// //
// An error is returned if the registered Collector is incompatible or // An error is returned if the registered Collector is incompatible or
// inconsistent with already registered metrics. The registry aims for // inconsistent with already registered metrics. The registry aims for
// consistency of the collected metrics according to the Prometheus data // consistency of the collected metrics according to the Prometheus data model.
// model. Inconsistencies are ideally detected at registration time, not at // Inconsistencies are ideally detected at registration time, not at collect
// collect time. The former will usually be detected at start-up time of a // time. The former will usually be detected at start-up time of a program,
// program, while the latter will only happen at scrape time, possibly not even // while the latter will only happen at scrape time, possibly not even on the
// on the first scrape if the inconsistency only becomes relevant later. That is // first scrape if the inconsistency only becomes relevant later. That is the
// the main reason why a Collector and a Metric have to describe themselves to // main reason why a Collector and a Metric have to describe themselves to the
// the registry. // registry.
// //
// So far, everything we did operated on the so-called default registry, as it // So far, everything we did operated on the so-called default registry, as it
// can be found in the global DefaultRegistry variable. With NewRegistry, you // can be found in the global DefaultRegisterer variable. With NewRegistry, you
// can create a custom registry, or you can even implement the Registerer or // can create a custom registry, or you can even implement the Registerer or
// Gatherer interfaces yourself. The methods Register and Unregister work in // Gatherer interfaces yourself. The methods Register and Unregister work in the
// the same way on a custom registry as the global functions Register and // same way on a custom registry as the global functions Register and Unregister
// Unregister on the default registry. // on the default registry.
// //
// There are a number of uses for custom registries: You can use registries // There are a number of uses for custom registries: You can use registries with
// with special properties, see NewPedanticRegistry. You can avoid global state, // special properties, see NewPedanticRegistry. You can avoid global state, as
// as it is imposed by the DefaultRegistry. You can use multiple registries at // it is imposed by the DefaultRegisterer. You can use multiple registries at
// the same time to expose different metrics in different ways. You can use // the same time to expose different metrics in different ways. You can use
// separate registries for testing purposes. // separate registries for testing purposes.
// //
// Also note that the DefaultRegistry comes registered with a Collector for Go // Also note that the DefaultRegisterer comes registered with a Collector for Go
// runtime metrics (via NewGoCollector) and a Collector for process metrics (via // runtime metrics (via NewGoCollector) and a Collector for process metrics (via
// NewProcessCollector). With a custom registry, you are in control and decide // NewProcessCollector). With a custom registry, you are in control and decide
// yourself about the Collectors to register. // yourself about the Collectors to register.
@ -166,16 +182,20 @@
// The Registry implements the Gatherer interface. The caller of the Gather // The Registry implements the Gatherer interface. The caller of the Gather
// method can then expose the gathered metrics in some way. Usually, the metrics // method can then expose the gathered metrics in some way. Usually, the metrics
// are served via HTTP on the /metrics endpoint. That's happening in the example // are served via HTTP on the /metrics endpoint. That's happening in the example
// above. The tools to expose metrics via HTTP are in the promhttp // above. The tools to expose metrics via HTTP are in the promhttp sub-package.
// sub-package. (The top-level functions in the prometheus package are // (The top-level functions in the prometheus package are deprecated.)
// deprecated.)
// //
// Pushing to the Pushgateway // Pushing to the Pushgateway
// //
// Function for pushing to the Pushgateway can be found in the push sub-package. // Function for pushing to the Pushgateway can be found in the push sub-package.
// //
// Graphite Bridge
//
// Functions and examples to push metrics from a Gatherer to Graphite can be
// found in the graphite sub-package.
//
// Other Means of Exposition // Other Means of Exposition
// //
// More ways of exposing metrics can easily be added. Sending metrics to // More ways of exposing metrics can easily be added by following the approaches
// Graphite would be an example that will soon be implemented. // of the existing implementations.
package prometheus package prometheus

@ -17,18 +17,18 @@ import "github.com/prometheus/client_golang/prometheus"
// ClusterManager is an example for a system that might have been built without // ClusterManager is an example for a system that might have been built without
// Prometheus in mind. It models a central manager of jobs running in a // Prometheus in mind. It models a central manager of jobs running in a
// cluster. To turn it into something that collects Prometheus metrics, we // cluster. Thus, we implement a custom Collector called
// simply add the two methods required for the Collector interface. // ClusterManagerCollector, which collects information from a ClusterManager
// using its provided methods and turns them into Prometheus Metrics for
// collection.
// //
// An additional challenge is that multiple instances of the ClusterManager are // An additional challenge is that multiple instances of the ClusterManager are
// run within the same binary, each in charge of a different zone. We need to // run within the same binary, each in charge of a different zone. We need to
// make use of ConstLabels to be able to register each ClusterManager instance // make use of wrapping Registerers to be able to register each
// with Prometheus. // ClusterManagerCollector instance with Prometheus.
type ClusterManager struct { type ClusterManager struct {
Zone string Zone string
OOMCountDesc *prometheus.Desc // Contains many more fields not listed in this example.
RAMUsageDesc *prometheus.Desc
// ... many more fields
} }
// ReallyExpensiveAssessmentOfTheSystemState is a mock for the data gathering a // ReallyExpensiveAssessmentOfTheSystemState is a mock for the data gathering a
@ -50,10 +50,30 @@ func (c *ClusterManager) ReallyExpensiveAssessmentOfTheSystemState() (
return return
} }
// Describe simply sends the two Descs in the struct to the channel. // ClusterManagerCollector implements the Collector interface.
func (c *ClusterManager) Describe(ch chan<- *prometheus.Desc) { type ClusterManagerCollector struct {
ch <- c.OOMCountDesc ClusterManager *ClusterManager
ch <- c.RAMUsageDesc }
// Descriptors used by the ClusterManagerCollector below.
var (
oomCountDesc = prometheus.NewDesc(
"clustermanager_oom_crashes_total",
"Number of OOM crashes.",
[]string{"host"}, nil,
)
ramUsageDesc = prometheus.NewDesc(
"clustermanager_ram_usage_bytes",
"RAM usage as reported to the cluster manager.",
[]string{"host"}, nil,
)
)
// Describe is implemented with DescribeByCollect. That's possible because the
// Collect method will always return the same two metrics with the same two
// descriptors.
func (cc ClusterManagerCollector) Describe(ch chan<- *prometheus.Desc) {
prometheus.DescribeByCollect(cc, ch)
} }
// Collect first triggers the ReallyExpensiveAssessmentOfTheSystemState. Then it // Collect first triggers the ReallyExpensiveAssessmentOfTheSystemState. Then it
@ -61,11 +81,11 @@ func (c *ClusterManager) Describe(ch chan<- *prometheus.Desc) {
// //
// Note that Collect could be called concurrently, so we depend on // Note that Collect could be called concurrently, so we depend on
// ReallyExpensiveAssessmentOfTheSystemState to be concurrency-safe. // ReallyExpensiveAssessmentOfTheSystemState to be concurrency-safe.
func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) { func (cc ClusterManagerCollector) Collect(ch chan<- prometheus.Metric) {
oomCountByHost, ramUsageByHost := c.ReallyExpensiveAssessmentOfTheSystemState() oomCountByHost, ramUsageByHost := cc.ClusterManager.ReallyExpensiveAssessmentOfTheSystemState()
for host, oomCount := range oomCountByHost { for host, oomCount := range oomCountByHost {
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.OOMCountDesc, oomCountDesc,
prometheus.CounterValue, prometheus.CounterValue,
float64(oomCount), float64(oomCount),
host, host,
@ -73,7 +93,7 @@ func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) {
} }
for host, ramUsage := range ramUsageByHost { for host, ramUsage := range ramUsageByHost {
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.RAMUsageDesc, ramUsageDesc,
prometheus.GaugeValue, prometheus.GaugeValue,
ramUsage, ramUsage,
host, host,
@ -81,38 +101,27 @@ func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) {
} }
} }
// NewClusterManager creates the two Descs OOMCountDesc and RAMUsageDesc. Note // NewClusterManager first creates a Prometheus-ignorant ClusterManager
// that the zone is set as a ConstLabel. (It's different in each instance of the // instance. Then, it creates a ClusterManagerCollector for the just created
// ClusterManager, but constant over the lifetime of an instance.) Then there is // ClusterManager. Finally, it registers the ClusterManagerCollector with a
// a variable label "host", since we want to partition the collected metrics by // wrapping Registerer that adds the zone as a label. In this way, the metrics
// host. Since all Descs created in this way are consistent across instances, // collected by different ClusterManagerCollectors do not collide.
// with a guaranteed distinction by the "zone" label, we can register different func NewClusterManager(zone string, reg prometheus.Registerer) *ClusterManager {
// ClusterManager instances with the same registry. c := &ClusterManager{
func NewClusterManager(zone string) *ClusterManager {
return &ClusterManager{
Zone: zone, Zone: zone,
OOMCountDesc: prometheus.NewDesc(
"clustermanager_oom_crashes_total",
"Number of OOM crashes.",
[]string{"host"},
prometheus.Labels{"zone": zone},
),
RAMUsageDesc: prometheus.NewDesc(
"clustermanager_ram_usage_bytes",
"RAM usage as reported to the cluster manager.",
[]string{"host"},
prometheus.Labels{"zone": zone},
),
} }
cc := ClusterManagerCollector{ClusterManager: c}
prometheus.WrapRegistererWith(prometheus.Labels{"zone": zone}, reg).MustRegister(cc)
return c
} }
func ExampleCollector() { func ExampleCollector() {
workerDB := NewClusterManager("db")
workerCA := NewClusterManager("ca")
// Since we are dealing with custom Collector implementations, it might // Since we are dealing with custom Collector implementations, it might
// be a good idea to try it out with a pedantic registry. // be a good idea to try it out with a pedantic registry.
reg := prometheus.NewPedanticRegistry() reg := prometheus.NewPedanticRegistry()
reg.MustRegister(workerDB)
reg.MustRegister(workerCA) // Construct cluster managers. In real code, we would assign them to
// variables to then do something with them.
NewClusterManager("db", reg)
NewClusterManager("ca", reg)
} }

@ -0,0 +1,71 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus_test
import (
"net/http"
"github.com/prometheus/client_golang/prometheus"
)
var (
// apiRequestDuration tracks the duration separate for each HTTP status
// class (1xx, 2xx, ...). This creates a fair amount of time series on
// the Prometheus server. Usually, you would track the duration of
// serving HTTP request without partitioning by outcome. Do something
// like this only if needed. Also note how only status classes are
// tracked, not every single status code. The latter would create an
// even larger amount of time series. Request counters partitioned by
// status code are usually OK as each counter only creates one time
// series. Histograms are way more expensive, so partition with care and
// only where you really need separate latency tracking. Partitioning by
// status class is only an example. In concrete cases, other partitions
// might make more sense.
apiRequestDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "api_request_duration_seconds",
Help: "Histogram for the request duration of the public API, partitioned by status class.",
Buckets: prometheus.ExponentialBuckets(0.1, 1.5, 5),
},
[]string{"status_class"},
)
)
func handler(w http.ResponseWriter, r *http.Request) {
status := http.StatusOK
// The ObserverFunc gets called by the deferred ObserveDuration and
// decides which Histogram's Observe method is called.
timer := prometheus.NewTimer(prometheus.ObserverFunc(func(v float64) {
switch {
case status >= 500: // Server error.
apiRequestDuration.WithLabelValues("5xx").Observe(v)
case status >= 400: // Client error.
apiRequestDuration.WithLabelValues("4xx").Observe(v)
case status >= 300: // Redirection.
apiRequestDuration.WithLabelValues("3xx").Observe(v)
case status >= 200: // Success.
apiRequestDuration.WithLabelValues("2xx").Observe(v)
default: // Informational.
apiRequestDuration.WithLabelValues("1xx").Observe(v)
}
}))
defer timer.ObserveDuration()
// Handle the request. Set status accordingly.
// ...
}
func ExampleTimer_complex() {
http.HandleFunc("/api", handler)
}

@ -0,0 +1,48 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus_test
import (
"os"
"github.com/prometheus/client_golang/prometheus"
)
var (
// If a function is called rarely (i.e. not more often than scrapes
// happen) or ideally only once (like in a batch job), it can make sense
// to use a Gauge for timing the function call. For timing a batch job
// and pushing the result to a Pushgateway, see also the comprehensive
// example in the push package.
funcDuration = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "example_function_duration_seconds",
Help: "Duration of the last call of an example function.",
})
)
func run() error {
// The Set method of the Gauge is used to observe the duration.
timer := prometheus.NewTimer(prometheus.ObserverFunc(funcDuration.Set))
defer timer.ObserveDuration()
// Do something. Return errors as encountered. The use of 'defer' above
// makes sure the function is still timed properly.
return nil
}
func ExampleTimer_gauge() {
if err := run(); err != nil {
os.Exit(1)
}
}

@ -0,0 +1,40 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus_test
import (
"math/rand"
"time"
"github.com/prometheus/client_golang/prometheus"
)
var (
requestDuration = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "example_request_duration_seconds",
Help: "Histogram for the runtime of a simple example function.",
Buckets: prometheus.LinearBuckets(0.01, 0.01, 10),
})
)
func ExampleTimer() {
// timer times this example function. It uses a Histogram, but a Summary
// would also work, as both implement Observer. Check out
// https://prometheus.io/docs/practices/histograms/ for differences.
timer := prometheus.NewTimer(requestDuration)
defer timer.ObserveDuration()
// Do something here that takes time.
time.Sleep(time.Duration(rand.NormFloat64()*10000+50000) * time.Microsecond)
}

@ -19,13 +19,13 @@ import (
"math" "math"
"net/http" "net/http"
"runtime" "runtime"
"sort"
"strings" "strings"
"time"
dto "github.com/prometheus/client_model/go" "github.com/golang/protobuf/proto"
"github.com/prometheus/common/expfmt" "github.com/prometheus/common/expfmt"
"github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
@ -89,37 +89,6 @@ func ExampleGaugeFunc() {
// GaugeFunc 'goroutines_count' registered. // GaugeFunc 'goroutines_count' registered.
} }
func ExampleCounter() {
pushCounter := prometheus.NewCounter(prometheus.CounterOpts{
Name: "repository_pushes", // Note: No help string...
})
err := prometheus.Register(pushCounter) // ... so this will return an error.
if err != nil {
fmt.Println("Push counter couldn't be registered, no counting will happen:", err)
return
}
// Try it once more, this time with a help string.
pushCounter = prometheus.NewCounter(prometheus.CounterOpts{
Name: "repository_pushes",
Help: "Number of pushes to external repository.",
})
err = prometheus.Register(pushCounter)
if err != nil {
fmt.Println("Push counter couldn't be registered AGAIN, no counting will happen:", err)
return
}
pushComplete := make(chan struct{})
// TODO: Start a goroutine that performs repository pushes and reports
// each completion via the channel.
for _ = range pushComplete {
pushCounter.Inc()
}
// Output:
// Push counter couldn't be registered, no counting will happen: descriptor Desc{fqName: "repository_pushes", help: "", constLabels: {}, variableLabels: []} is invalid: empty help string
}
func ExampleCounterVec() { func ExampleCounterVec() {
httpReqs := prometheus.NewCounterVec( httpReqs := prometheus.NewCounterVec(
prometheus.CounterOpts{ prometheus.CounterOpts{
@ -167,19 +136,6 @@ func ExampleInstrumentHandler() {
)) ))
} }
func ExampleLabelPairSorter() {
labelPairs := []*dto.LabelPair{
&dto.LabelPair{Name: proto.String("status"), Value: proto.String("404")},
&dto.LabelPair{Name: proto.String("method"), Value: proto.String("get")},
}
sort.Sort(prometheus.LabelPairSorter(labelPairs))
fmt.Println(labelPairs)
// Output:
// [name:"method" value:"get" name:"status" value:"404" ]
}
func ExampleRegister() { func ExampleRegister() {
// Imagine you have a worker pool and want to count the tasks completed. // Imagine you have a worker pool and want to count the tasks completed.
taskCounter := prometheus.NewCounter(prometheus.CounterOpts{ taskCounter := prometheus.NewCounter(prometheus.CounterOpts{
@ -334,8 +290,9 @@ func ExampleRegister() {
func ExampleSummary() { func ExampleSummary() {
temps := prometheus.NewSummary(prometheus.SummaryOpts{ temps := prometheus.NewSummary(prometheus.SummaryOpts{
Name: "pond_temperature_celsius", Name: "pond_temperature_celsius",
Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells. Help: "The temperature of the frog pond.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}) })
// Simulate some observations. // Simulate some observations.
@ -372,8 +329,9 @@ func ExampleSummary() {
func ExampleSummaryVec() { func ExampleSummaryVec() {
temps := prometheus.NewSummaryVec( temps := prometheus.NewSummaryVec(
prometheus.SummaryOpts{ prometheus.SummaryOpts{
Name: "pond_temperature_celsius", Name: "pond_temperature_celsius",
Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells. Help: "The temperature of the frog pond.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}, },
[]string{"species"}, []string{"species"},
) )
@ -640,6 +598,7 @@ func ExampleAlreadyRegisteredError() {
panic(err) panic(err)
} }
} }
reqCounter.Inc()
} }
func ExampleGatherers() { func ExampleGatherers() {
@ -709,7 +668,7 @@ humidity_percent{location="inside"} 33.2
# HELP temperature_kelvin Temperature in Kelvin. # HELP temperature_kelvin Temperature in Kelvin.
# Duplicate metric: # Duplicate metric:
temperature_kelvin{location="outside"} 265.3 temperature_kelvin{location="outside"} 265.3
# Wrong labels: # Missing location label (note that this is undesirable but valid):
temperature_kelvin 4.5 temperature_kelvin 4.5
` `
@ -737,15 +696,47 @@ temperature_kelvin 4.5
// temperature_kelvin{location="outside"} 273.14 // temperature_kelvin{location="outside"} 273.14
// temperature_kelvin{location="somewhere else"} 4.5 // temperature_kelvin{location="somewhere else"} 4.5
// ---------- // ----------
// 2 error(s) occurred: // collected metric "temperature_kelvin" { label:<name:"location" value:"outside" > gauge:<value:265.3 > } was collected before with the same name and label values
// * collected metric temperature_kelvin label:<name:"location" value:"outside" > gauge:<value:265.3 > was collected before with the same name and label values
// * collected metric temperature_kelvin gauge:<value:4.5 > has label dimensions inconsistent with previously collected metrics in the same metric family
// # HELP humidity_percent Humidity in %. // # HELP humidity_percent Humidity in %.
// # TYPE humidity_percent gauge // # TYPE humidity_percent gauge
// humidity_percent{location="inside"} 33.2 // humidity_percent{location="inside"} 33.2
// humidity_percent{location="outside"} 45.4 // humidity_percent{location="outside"} 45.4
// # HELP temperature_kelvin Temperature in Kelvin. // # HELP temperature_kelvin Temperature in Kelvin.
// # TYPE temperature_kelvin gauge // # TYPE temperature_kelvin gauge
// temperature_kelvin 4.5
// temperature_kelvin{location="inside"} 298.44 // temperature_kelvin{location="inside"} 298.44
// temperature_kelvin{location="outside"} 273.14 // temperature_kelvin{location="outside"} 273.14
} }
func ExampleNewMetricWithTimestamp() {
desc := prometheus.NewDesc(
"temperature_kelvin",
"Current temperature in Kelvin.",
nil, nil,
)
// Create a constant gauge from values we got from an external
// temperature reporting system. Those values are reported with a slight
// delay, so we want to add the timestamp of the actual measurement.
temperatureReportedByExternalSystem := 298.15
timeReportedByExternalSystem := time.Date(2009, time.November, 10, 23, 0, 0, 12345678, time.UTC)
s := prometheus.NewMetricWithTimestamp(
timeReportedByExternalSystem,
prometheus.MustNewConstMetric(
desc, prometheus.GaugeValue, temperatureReportedByExternalSystem,
),
)
// Just for demonstration, let's check the state of the gauge by
// (ab)using its Write method (which is usually only used by Prometheus
// internally).
metric := &dto.Metric{}
s.Write(metric)
fmt.Println(proto.MarshalTextString(metric))
// Output:
// gauge: <
// value: 298.15
// >
// timestamp_ms: 1257894000012
}

@ -24,7 +24,7 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
func ExampleExpvarCollector() { func ExampleNewExpvarCollector() {
expvarCollector := prometheus.NewExpvarCollector(map[string]*prometheus.Desc{ expvarCollector := prometheus.NewExpvarCollector(map[string]*prometheus.Desc{
"memstats": prometheus.NewDesc( "memstats": prometheus.NewDesc(
"expvar_memstats", "expvar_memstats",
@ -78,7 +78,7 @@ func ExampleExpvarCollector() {
close(metricChan) close(metricChan)
}() }()
for m := range metricChan { for m := range metricChan {
if strings.Index(m.Desc().String(), "expvar_memstats") == -1 { if !strings.Contains(m.Desc().String(), "expvar_memstats") {
metric.Reset() metric.Reset()
m.Write(&metric) m.Write(&metric)
metricStrings = append(metricStrings, metric.String()) metricStrings = append(metricStrings, metric.String())

@ -1,3 +1,16 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus package prometheus
// Inline and byte-free variant of hash/fnv's fnv64a. // Inline and byte-free variant of hash/fnv's fnv64a.

@ -13,6 +13,14 @@
package prometheus package prometheus
import (
"math"
"sync/atomic"
"time"
dto "github.com/prometheus/client_model/go"
)
// Gauge is a Metric that represents a single numerical value that can // Gauge is a Metric that represents a single numerical value that can
// arbitrarily go up and down. // arbitrarily go up and down.
// //
@ -27,29 +35,95 @@ type Gauge interface {
// Set sets the Gauge to an arbitrary value. // Set sets the Gauge to an arbitrary value.
Set(float64) Set(float64)
// Inc increments the Gauge by 1. // Inc increments the Gauge by 1. Use Add to increment it by arbitrary
// values.
Inc() Inc()
// Dec decrements the Gauge by 1. // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
// values.
Dec() Dec()
// Add adds the given value to the Gauge. (The value can be // Add adds the given value to the Gauge. (The value can be negative,
// negative, resulting in a decrease of the Gauge.) // resulting in a decrease of the Gauge.)
Add(float64) Add(float64)
// Sub subtracts the given value from the Gauge. (The value can be // Sub subtracts the given value from the Gauge. (The value can be
// negative, resulting in an increase of the Gauge.) // negative, resulting in an increase of the Gauge.)
Sub(float64) Sub(float64)
// SetToCurrentTime sets the Gauge to the current Unix time in seconds.
SetToCurrentTime()
} }
// GaugeOpts is an alias for Opts. See there for doc comments. // GaugeOpts is an alias for Opts. See there for doc comments.
type GaugeOpts Opts type GaugeOpts Opts
// NewGauge creates a new Gauge based on the provided GaugeOpts. // NewGauge creates a new Gauge based on the provided GaugeOpts.
//
// The returned implementation is optimized for a fast Set method. If you have a
// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick
// the former. For example, the Inc method of the returned Gauge is slower than
// the Inc method of a Counter returned by NewCounter. This matches the typical
// scenarios for Gauges and Counters, where the former tends to be Set-heavy and
// the latter Inc-heavy.
func NewGauge(opts GaugeOpts) Gauge { func NewGauge(opts GaugeOpts) Gauge {
return newValue(NewDesc( desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help, opts.Help,
nil, nil,
opts.ConstLabels, opts.ConstLabels,
), GaugeValue, 0) )
result := &gauge{desc: desc, labelPairs: desc.constLabelPairs}
result.init(result) // Init self-collection.
return result
}
type gauge struct {
// valBits contains the bits of the represented float64 value. It has
// to go first in the struct to guarantee alignment for atomic
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
valBits uint64
selfCollector
desc *Desc
labelPairs []*dto.LabelPair
}
func (g *gauge) Desc() *Desc {
return g.desc
}
func (g *gauge) Set(val float64) {
atomic.StoreUint64(&g.valBits, math.Float64bits(val))
}
func (g *gauge) SetToCurrentTime() {
g.Set(float64(time.Now().UnixNano()) / 1e9)
}
func (g *gauge) Inc() {
g.Add(1)
}
func (g *gauge) Dec() {
g.Add(-1)
}
func (g *gauge) Add(val float64) {
for {
oldBits := atomic.LoadUint64(&g.valBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) {
return
}
}
}
func (g *gauge) Sub(val float64) {
g.Add(val * -1)
}
func (g *gauge) Write(out *dto.Metric) error {
val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
return populateMetric(GaugeValue, val, g.labelPairs, out)
} }
// GaugeVec is a Collector that bundles a set of Gauges that all share the same // GaugeVec is a Collector that bundles a set of Gauges that all share the same
@ -58,12 +132,11 @@ func NewGauge(opts GaugeOpts) Gauge {
// (e.g. number of operations queued, partitioned by user and operation // (e.g. number of operations queued, partitioned by user and operation
// type). Create instances with NewGaugeVec. // type). Create instances with NewGaugeVec.
type GaugeVec struct { type GaugeVec struct {
*MetricVec *metricVec
} }
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and // NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
// partitioned by the given label names. At least one label name must be // partitioned by the given label names.
// provided.
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
desc := NewDesc( desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
@ -72,28 +145,62 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
opts.ConstLabels, opts.ConstLabels,
) )
return &GaugeVec{ return &GaugeVec{
MetricVec: newMetricVec(desc, func(lvs ...string) Metric { metricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newValue(desc, GaugeValue, 0, lvs...) if len(lvs) != len(desc.variableLabels) {
panic(errInconsistentCardinality)
}
result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
result.init(result) // Init self-collection.
return result
}), }),
} }
} }
// GetMetricWithLabelValues replaces the method of the same name in // GetMetricWithLabelValues returns the Gauge for the given slice of label
// MetricVec. The difference is that this method returns a Gauge and not a // values (same order as the VariableLabels in Desc). If that combination of
// Metric so that no type conversion is required. // label values is accessed for the first time, a new Gauge is created.
func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { //
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) // It is possible to call this method without using the returned Gauge to only
// create the new Gauge but leave it at its starting value 0. See also the
// SummaryVec example.
//
// Keeping the Gauge for later use is possible (and should be considered if
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
// Delete can be used to delete the Gauge from the GaugeVec. In that case, the
// Gauge will still exist, but it will not be exported anymore, even if a
// Gauge with the same label values is created later. See also the CounterVec
// example.
//
// An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
// an alternative to avoid that type of mistake. For higher label numbers, the
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
if metric != nil { if metric != nil {
return metric.(Gauge), err return metric.(Gauge), err
} }
return nil, err return nil, err
} }
// GetMetricWith replaces the method of the same name in MetricVec. The // GetMetricWith returns the Gauge for the given Labels map (the label names
// difference is that this method returns a Gauge and not a Metric so that no // must match those of the VariableLabels in Desc). If that label map is
// type conversion is required. // accessed for the first time, a new Gauge is created. Implications of
func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { // creating a Gauge without using it and keeping the Gauge for later use are
metric, err := m.MetricVec.GetMetricWith(labels) // the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
metric, err := v.metricVec.getMetricWith(labels)
if metric != nil { if metric != nil {
return metric.(Gauge), err return metric.(Gauge), err
} }
@ -101,18 +208,57 @@ func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
} }
// WithLabelValues works as GetMetricWithLabelValues, but panics where // WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. By not returning an // GetMetricWithLabelValues would have returned an error. Not returning an
// error, WithLabelValues allows shortcuts like // error allows shortcuts like
// myVec.WithLabelValues("404", "GET").Add(42) // myVec.WithLabelValues("404", "GET").Add(42)
func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge { func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
return m.MetricVec.WithLabelValues(lvs...).(Gauge) g, err := v.GetMetricWithLabelValues(lvs...)
if err != nil {
panic(err)
}
return g
} }
// With works as GetMetricWith, but panics where GetMetricWithLabels would have // With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. By not returning an error, With allows shortcuts like // returned an error. Not returning an error allows shortcuts like
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) // myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
func (m *GaugeVec) With(labels Labels) Gauge { func (v *GaugeVec) With(labels Labels) Gauge {
return m.MetricVec.With(labels).(Gauge) g, err := v.GetMetricWith(labels)
if err != nil {
panic(err)
}
return g
}
// CurryWith returns a vector curried with the provided labels, i.e. the
// returned vector has those labels pre-set for all labeled operations performed
// on it. The cardinality of the curried vector is reduced accordingly. The
// order of the remaining labels stays the same (just with the curried labels
// taken out of the sequence which is relevant for the
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
// vector, but only with labels not yet used for currying before.
//
// The metrics contained in the GaugeVec are shared between the curried and
// uncurried vectors. They are just accessed differently. Curried and uncurried
// vectors behave identically in terms of collection. Only one must be
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) {
vec, err := v.curryWith(labels)
if vec != nil {
return &GaugeVec{vec}, err
}
return nil, err
}
// MustCurryWith works as CurryWith but panics where CurryWith would have
// returned an error.
func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec {
vec, err := v.CurryWith(labels)
if err != nil {
panic(err)
}
return vec
} }
// GaugeFunc is a Gauge whose value is determined at collect time by calling a // GaugeFunc is a Gauge whose value is determined at collect time by calling a

@ -19,6 +19,7 @@ import (
"sync" "sync"
"testing" "testing"
"testing/quick" "testing/quick"
"time"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
) )
@ -82,7 +83,7 @@ func TestGaugeConcurrency(t *testing.T) {
} }
start.Done() start.Done()
if expected, got := <-result, math.Float64frombits(gge.(*value).valBits); math.Abs(expected-got) > 0.000001 { if expected, got := <-result, math.Float64frombits(gge.(*gauge).valBits); math.Abs(expected-got) > 0.000001 {
t.Fatalf("expected approx. %f, got %f", expected, got) t.Fatalf("expected approx. %f, got %f", expected, got)
return false return false
} }
@ -146,7 +147,7 @@ func TestGaugeVecConcurrency(t *testing.T) {
start.Done() start.Done()
for i := range sStreams { for i := range sStreams {
if expected, got := <-results[i], math.Float64frombits(gge.WithLabelValues(string('A'+i)).(*value).valBits); math.Abs(expected-got) > 0.000001 { if expected, got := <-results[i], math.Float64frombits(gge.WithLabelValues(string('A'+i)).(*gauge).valBits); math.Abs(expected-got) > 0.000001 {
t.Fatalf("expected approx. %f, got %f", expected, got) t.Fatalf("expected approx. %f, got %f", expected, got)
return false return false
} }
@ -180,3 +181,22 @@ func TestGaugeFunc(t *testing.T) {
t.Errorf("expected %q, got %q", expected, got) t.Errorf("expected %q, got %q", expected, got)
} }
} }
func TestGaugeSetCurrentTime(t *testing.T) {
g := NewGauge(GaugeOpts{
Name: "test_name",
Help: "test help",
})
g.SetToCurrentTime()
unixTime := float64(time.Now().Unix())
m := &dto.Metric{}
g.Write(m)
delta := unixTime - m.GetGauge().GetValue()
// This is just a smoke test to make sure SetToCurrentTime is not
// totally off. Tests with current time involved are hard...
if math.Abs(delta) > 5 {
t.Errorf("Gauge set to current time deviates from current time by more than 5s, delta is %f seconds", delta)
}
}

@ -1,3 +1,16 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus package prometheus
import ( import (
@ -8,26 +21,39 @@ import (
) )
type goCollector struct { type goCollector struct {
goroutines Gauge goroutinesDesc *Desc
gcDesc *Desc threadsDesc *Desc
gcDesc *Desc
goInfoDesc *Desc
// metrics to describe and collect // metrics to describe and collect
metrics memStatsMetrics metrics memStatsMetrics
} }
// NewGoCollector returns a collector which exports metrics about the current // NewGoCollector returns a collector which exports metrics about the current Go
// go process. // process. This includes memory stats. To collect those, runtime.ReadMemStats
// is called. This causes a stop-the-world, which is very short with Go1.9+
// (~25µs). However, with older Go versions, the stop-the-world duration depends
// on the heap size and can be quite significant (~1.7 ms/GiB as per
// https://go-review.googlesource.com/c/go/+/34937).
func NewGoCollector() Collector { func NewGoCollector() Collector {
return &goCollector{ return &goCollector{
goroutines: NewGauge(GaugeOpts{ goroutinesDesc: NewDesc(
Namespace: "go", "go_goroutines",
Name: "goroutines", "Number of goroutines that currently exist.",
Help: "Number of goroutines that currently exist.", nil, nil),
}), threadsDesc: NewDesc(
"go_threads",
"Number of OS threads created.",
nil, nil),
gcDesc: NewDesc( gcDesc: NewDesc(
"go_gc_duration_seconds", "go_gc_duration_seconds",
"A summary of the GC invocation durations.", "A summary of the GC invocation durations.",
nil, nil), nil, nil),
goInfoDesc: NewDesc(
"go_info",
"Information about the Go environment.",
nil, Labels{"version": runtime.Version()}),
metrics: memStatsMetrics{ metrics: memStatsMetrics{
{ {
desc: NewDesc( desc: NewDesc(
@ -48,7 +74,7 @@ func NewGoCollector() Collector {
}, { }, {
desc: NewDesc( desc: NewDesc(
memstatNamespace("sys_bytes"), memstatNamespace("sys_bytes"),
"Number of bytes obtained by system. Sum of all system allocations.", "Number of bytes obtained from system.",
nil, nil, nil, nil,
), ),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
@ -111,12 +137,12 @@ func NewGoCollector() Collector {
valType: GaugeValue, valType: GaugeValue,
}, { }, {
desc: NewDesc( desc: NewDesc(
memstatNamespace("heap_released_bytes_total"), memstatNamespace("heap_released_bytes"),
"Total number of heap bytes released to OS.", "Number of heap bytes released to OS.",
nil, nil, nil, nil,
), ),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
valType: CounterValue, valType: GaugeValue,
}, { }, {
desc: NewDesc( desc: NewDesc(
memstatNamespace("heap_objects"), memstatNamespace("heap_objects"),
@ -213,6 +239,14 @@ func NewGoCollector() Collector {
), ),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
valType: GaugeValue, valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("gc_cpu_fraction"),
"The fraction of this program's available CPU time used by the GC since the program started.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
valType: GaugeValue,
}, },
}, },
} }
@ -224,9 +258,10 @@ func memstatNamespace(s string) string {
// Describe returns all descriptions of the collector. // Describe returns all descriptions of the collector.
func (c *goCollector) Describe(ch chan<- *Desc) { func (c *goCollector) Describe(ch chan<- *Desc) {
ch <- c.goroutines.Desc() ch <- c.goroutinesDesc
ch <- c.threadsDesc
ch <- c.gcDesc ch <- c.gcDesc
ch <- c.goInfoDesc
for _, i := range c.metrics { for _, i := range c.metrics {
ch <- i.desc ch <- i.desc
} }
@ -234,8 +269,9 @@ func (c *goCollector) Describe(ch chan<- *Desc) {
// Collect returns the current state of all metrics of the collector. // Collect returns the current state of all metrics of the collector.
func (c *goCollector) Collect(ch chan<- Metric) { func (c *goCollector) Collect(ch chan<- Metric) {
c.goroutines.Set(float64(runtime.NumGoroutine())) ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
ch <- c.goroutines n, _ := runtime.ThreadCreateProfile(nil)
ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
var stats debug.GCStats var stats debug.GCStats
stats.PauseQuantiles = make([]time.Duration, 5) stats.PauseQuantiles = make([]time.Duration, 5)
@ -246,7 +282,9 @@ func (c *goCollector) Collect(ch chan<- Metric) {
quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
} }
quantiles[0.0] = stats.PauseQuantiles[0].Seconds() quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles) ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
ms := &runtime.MemStats{} ms := &runtime.MemStats{}
runtime.ReadMemStats(ms) runtime.ReadMemStats(ms)

@ -1,3 +1,16 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus package prometheus
import ( import (
@ -29,33 +42,37 @@ func TestGoCollector(t *testing.T) {
for { for {
select { select {
case metric := <-ch: case m := <-ch:
switch m := metric.(type) { // m can be Gauge or Counter,
// Attention, this also catches Counter... // currently just test the go_goroutines Gauge
case Gauge: // and ignore others.
pb := &dto.Metric{} if m.Desc().fqName != "go_goroutines" {
m.Write(pb) continue
if pb.GetGauge() == nil { }
continue pb := &dto.Metric{}
} m.Write(pb)
if pb.GetGauge() == nil {
if old == -1 { continue
old = int(pb.GetGauge().GetValue()) }
close(waitc)
continue
}
if diff := int(pb.GetGauge().GetValue()) - old; diff != 1 { if old == -1 {
// TODO: This is flaky in highly concurrent situations. old = int(pb.GetGauge().GetValue())
t.Errorf("want 1 new goroutine, got %d", diff) close(waitc)
} continue
}
// GoCollector performs two sends per call. if diff := int(pb.GetGauge().GetValue()) - old; diff != 1 {
// On line 27 we need to receive the second send // TODO: This is flaky in highly concurrent situations.
// to shut down cleanly. t.Errorf("want 1 new goroutine, got %d", diff)
<-ch
return
} }
// GoCollector performs three sends per call.
// On line 27 we need to receive three more sends
// to shut down cleanly.
<-ch
<-ch
<-ch
return
case <-time.After(1 * time.Second): case <-time.After(1 * time.Second):
t.Fatalf("expected collect timed out") t.Fatalf("expected collect timed out")
} }
@ -85,37 +102,33 @@ func TestGCCollector(t *testing.T) {
for { for {
select { select {
case metric := <-ch: case metric := <-ch:
switch m := metric.(type) { pb := &dto.Metric{}
case *constSummary, *value: metric.Write(pb)
pb := &dto.Metric{} if pb.GetSummary() == nil {
m.Write(pb) continue
if pb.GetSummary() == nil { }
continue if len(pb.GetSummary().Quantile) != 5 {
} t.Errorf("expected 4 buckets, got %d", len(pb.GetSummary().Quantile))
}
if len(pb.GetSummary().Quantile) != 5 { for idx, want := range []float64{0.0, 0.25, 0.5, 0.75, 1.0} {
t.Errorf("expected 4 buckets, got %d", len(pb.GetSummary().Quantile)) if *pb.GetSummary().Quantile[idx].Quantile != want {
} t.Errorf("bucket #%d is off, got %f, want %f", idx, *pb.GetSummary().Quantile[idx].Quantile, want)
for idx, want := range []float64{0.0, 0.25, 0.5, 0.75, 1.0} {
if *pb.GetSummary().Quantile[idx].Quantile != want {
t.Errorf("bucket #%d is off, got %f, want %f", idx, *pb.GetSummary().Quantile[idx].Quantile, want)
}
}
if first {
first = false
oldGC = *pb.GetSummary().SampleCount
oldPause = *pb.GetSummary().SampleSum
close(waitc)
continue
}
if diff := *pb.GetSummary().SampleCount - oldGC; diff != 1 {
t.Errorf("want 1 new garbage collection run, got %d", diff)
}
if diff := *pb.GetSummary().SampleSum - oldPause; diff <= 0 {
t.Errorf("want moar pause, got %f", diff)
} }
return
} }
if first {
first = false
oldGC = *pb.GetSummary().SampleCount
oldPause = *pb.GetSummary().SampleSum
close(waitc)
continue
}
if diff := *pb.GetSummary().SampleCount - oldGC; diff != 1 {
t.Errorf("want 1 new garbage collection run, got %d", diff)
}
if diff := *pb.GetSummary().SampleSum - oldPause; diff <= 0 {
t.Errorf("want moar pause, got %f", diff)
}
return
case <-time.After(1 * time.Second): case <-time.After(1 * time.Second):
t.Fatalf("expected collect timed out") t.Fatalf("expected collect timed out")
} }

@ -0,0 +1,282 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package graphite provides a bridge to push Prometheus metrics to a Graphite
// server.
package graphite
import (
"bufio"
"errors"
"fmt"
"io"
"net"
"sort"
"time"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/common/model"
"golang.org/x/net/context"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/prometheus"
)
const (
defaultInterval = 15 * time.Second
millisecondsPerSecond = 1000
)
// HandlerErrorHandling defines how a Handler serving metrics will handle
// errors.
type HandlerErrorHandling int
// These constants cause handlers serving metrics to behave as described if
// errors are encountered.
const (
// Ignore errors and try to push as many metrics to Graphite as possible.
ContinueOnError HandlerErrorHandling = iota
// Abort the push to Graphite upon the first error encountered.
AbortOnError
)
// Config defines the Graphite bridge config.
type Config struct {
// The url to push data to. Required.
URL string
// The prefix for the pushed Graphite metrics. Defaults to empty string.
Prefix string
// The interval to use for pushing data to Graphite. Defaults to 15 seconds.
Interval time.Duration
// The timeout for pushing metrics to Graphite. Defaults to 15 seconds.
Timeout time.Duration
// The Gatherer to use for metrics. Defaults to prometheus.DefaultGatherer.
Gatherer prometheus.Gatherer
// The logger that messages are written to. Defaults to no logging.
Logger Logger
// ErrorHandling defines how errors are handled. Note that errors are
// logged regardless of the configured ErrorHandling provided Logger
// is not nil.
ErrorHandling HandlerErrorHandling
}
// Bridge pushes metrics to the configured Graphite server.
type Bridge struct {
url string
prefix string
interval time.Duration
timeout time.Duration
errorHandling HandlerErrorHandling
logger Logger
g prometheus.Gatherer
}
// Logger is the minimal interface Bridge needs for logging. Note that
// log.Logger from the standard library implements this interface, and it is
// easy to implement by custom loggers, if they don't do so already anyway.
type Logger interface {
Println(v ...interface{})
}
// NewBridge returns a pointer to a new Bridge struct.
func NewBridge(c *Config) (*Bridge, error) {
b := &Bridge{}
if c.URL == "" {
return nil, errors.New("missing URL")
}
b.url = c.URL
if c.Gatherer == nil {
b.g = prometheus.DefaultGatherer
} else {
b.g = c.Gatherer
}
if c.Logger != nil {
b.logger = c.Logger
}
if c.Prefix != "" {
b.prefix = c.Prefix
}
var z time.Duration
if c.Interval == z {
b.interval = defaultInterval
} else {
b.interval = c.Interval
}
if c.Timeout == z {
b.timeout = defaultInterval
} else {
b.timeout = c.Timeout
}
b.errorHandling = c.ErrorHandling
return b, nil
}
// Run starts the event loop that pushes Prometheus metrics to Graphite at the
// configured interval.
func (b *Bridge) Run(ctx context.Context) {
ticker := time.NewTicker(b.interval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if err := b.Push(); err != nil && b.logger != nil {
b.logger.Println("error pushing to Graphite:", err)
}
case <-ctx.Done():
return
}
}
}
// Push pushes Prometheus metrics to the configured Graphite server.
func (b *Bridge) Push() error {
mfs, err := b.g.Gather()
if err != nil || len(mfs) == 0 {
switch b.errorHandling {
case AbortOnError:
return err
case ContinueOnError:
if b.logger != nil {
b.logger.Println("continue on error:", err)
}
default:
panic("unrecognized error handling value")
}
}
conn, err := net.DialTimeout("tcp", b.url, b.timeout)
if err != nil {
return err
}
defer conn.Close()
return writeMetrics(conn, mfs, b.prefix, model.Now())
}
func writeMetrics(w io.Writer, mfs []*dto.MetricFamily, prefix string, now model.Time) error {
vec, err := expfmt.ExtractSamples(&expfmt.DecodeOptions{
Timestamp: now,
}, mfs...)
if err != nil {
return err
}
buf := bufio.NewWriter(w)
for _, s := range vec {
for _, c := range prefix {
if _, err := buf.WriteRune(c); err != nil {
return err
}
}
if err := buf.WriteByte('.'); err != nil {
return err
}
if err := writeMetric(buf, s.Metric); err != nil {
return err
}
if _, err := fmt.Fprintf(buf, " %g %d\n", s.Value, int64(s.Timestamp)/millisecondsPerSecond); err != nil {
return err
}
if err := buf.Flush(); err != nil {
return err
}
}
return nil
}
func writeMetric(buf *bufio.Writer, m model.Metric) error {
metricName, hasName := m[model.MetricNameLabel]
numLabels := len(m) - 1
if !hasName {
numLabels = len(m)
}
labelStrings := make([]string, 0, numLabels)
for label, value := range m {
if label != model.MetricNameLabel {
labelStrings = append(labelStrings, fmt.Sprintf("%s %s", string(label), string(value)))
}
}
var err error
switch numLabels {
case 0:
if hasName {
return writeSanitized(buf, string(metricName))
}
default:
sort.Strings(labelStrings)
if err = writeSanitized(buf, string(metricName)); err != nil {
return err
}
for _, s := range labelStrings {
if err = buf.WriteByte('.'); err != nil {
return err
}
if err = writeSanitized(buf, s); err != nil {
return err
}
}
}
return nil
}
func writeSanitized(buf *bufio.Writer, s string) error {
prevUnderscore := false
for _, c := range s {
c = replaceInvalidRune(c)
if c == '_' {
if prevUnderscore {
continue
}
prevUnderscore = true
} else {
prevUnderscore = false
}
if _, err := buf.WriteRune(c); err != nil {
return err
}
}
return nil
}
func replaceInvalidRune(c rune) rune {
if c == ' ' {
return '.'
}
if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' || c == ':' || c == '-' || (c >= '0' && c <= '9')) {
return '_'
}
return c
}

@ -0,0 +1,338 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package graphite
import (
"bufio"
"bytes"
"fmt"
"io"
"log"
"net"
"os"
"regexp"
"testing"
"time"
"github.com/prometheus/common/model"
"golang.org/x/net/context"
"github.com/prometheus/client_golang/prometheus"
)
func TestSanitize(t *testing.T) {
testCases := []struct {
in, out string
}{
{in: "hello", out: "hello"},
{in: "hE/l1o", out: "hE_l1o"},
{in: "he,*ll(.o", out: "he_ll_o"},
{in: "hello_there%^&", out: "hello_there_"},
{in: "hell-.o", out: "hell-_o"},
}
var buf bytes.Buffer
w := bufio.NewWriter(&buf)
for i, tc := range testCases {
if err := writeSanitized(w, tc.in); err != nil {
t.Fatalf("write failed: %v", err)
}
if err := w.Flush(); err != nil {
t.Fatalf("flush failed: %v", err)
}
if want, got := tc.out, buf.String(); want != got {
t.Fatalf("test case index %d: got sanitized string %s, want %s", i, got, want)
}
buf.Reset()
}
}
func TestWriteSummary(t *testing.T) {
sumVec := prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Name: "name",
Help: "docstring",
ConstLabels: prometheus.Labels{"constname": "constvalue"},
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
[]string{"labelname"},
)
sumVec.WithLabelValues("val1").Observe(float64(10))
sumVec.WithLabelValues("val1").Observe(float64(20))
sumVec.WithLabelValues("val1").Observe(float64(30))
sumVec.WithLabelValues("val2").Observe(float64(20))
sumVec.WithLabelValues("val2").Observe(float64(30))
sumVec.WithLabelValues("val2").Observe(float64(40))
reg := prometheus.NewRegistry()
reg.MustRegister(sumVec)
mfs, err := reg.Gather()
if err != nil {
t.Fatalf("error: %v", err)
}
testCases := []struct {
prefix string
}{
{prefix: "prefix"},
{prefix: "pre/fix"},
{prefix: "pre.fix"},
}
const want = `%s.name.constname.constvalue.labelname.val1.quantile.0_5 20 1477043
%s.name.constname.constvalue.labelname.val1.quantile.0_9 30 1477043
%s.name.constname.constvalue.labelname.val1.quantile.0_99 30 1477043
%s.name_sum.constname.constvalue.labelname.val1 60 1477043
%s.name_count.constname.constvalue.labelname.val1 3 1477043
%s.name.constname.constvalue.labelname.val2.quantile.0_5 30 1477043
%s.name.constname.constvalue.labelname.val2.quantile.0_9 40 1477043
%s.name.constname.constvalue.labelname.val2.quantile.0_99 40 1477043
%s.name_sum.constname.constvalue.labelname.val2 90 1477043
%s.name_count.constname.constvalue.labelname.val2 3 1477043
`
for i, tc := range testCases {
now := model.Time(1477043083)
var buf bytes.Buffer
err = writeMetrics(&buf, mfs, tc.prefix, now)
if err != nil {
t.Fatalf("error: %v", err)
}
wantWithPrefix := fmt.Sprintf(want,
tc.prefix, tc.prefix, tc.prefix, tc.prefix, tc.prefix,
tc.prefix, tc.prefix, tc.prefix, tc.prefix, tc.prefix,
)
if got := buf.String(); wantWithPrefix != got {
t.Fatalf("test case index %d: wanted \n%s\n, got \n%s\n", i, wantWithPrefix, got)
}
}
}
func TestWriteHistogram(t *testing.T) {
histVec := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "name",
Help: "docstring",
ConstLabels: prometheus.Labels{"constname": "constvalue"},
Buckets: []float64{0.01, 0.02, 0.05, 0.1},
},
[]string{"labelname"},
)
histVec.WithLabelValues("val1").Observe(float64(10))
histVec.WithLabelValues("val1").Observe(float64(20))
histVec.WithLabelValues("val1").Observe(float64(30))
histVec.WithLabelValues("val2").Observe(float64(20))
histVec.WithLabelValues("val2").Observe(float64(30))
histVec.WithLabelValues("val2").Observe(float64(40))
reg := prometheus.NewRegistry()
reg.MustRegister(histVec)
mfs, err := reg.Gather()
if err != nil {
t.Fatalf("error: %v", err)
}
now := model.Time(1477043083)
var buf bytes.Buffer
err = writeMetrics(&buf, mfs, "prefix", now)
if err != nil {
t.Fatalf("error: %v", err)
}
want := `prefix.name_bucket.constname.constvalue.labelname.val1.le.0_01 0 1477043
prefix.name_bucket.constname.constvalue.labelname.val1.le.0_02 0 1477043
prefix.name_bucket.constname.constvalue.labelname.val1.le.0_05 0 1477043
prefix.name_bucket.constname.constvalue.labelname.val1.le.0_1 0 1477043
prefix.name_sum.constname.constvalue.labelname.val1 60 1477043
prefix.name_count.constname.constvalue.labelname.val1 3 1477043
prefix.name_bucket.constname.constvalue.labelname.val1.le._Inf 3 1477043
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_01 0 1477043
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_02 0 1477043
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_05 0 1477043
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_1 0 1477043
prefix.name_sum.constname.constvalue.labelname.val2 90 1477043
prefix.name_count.constname.constvalue.labelname.val2 3 1477043
prefix.name_bucket.constname.constvalue.labelname.val2.le._Inf 3 1477043
`
if got := buf.String(); want != got {
t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
}
}
func TestToReader(t *testing.T) {
cntVec := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "name",
Help: "docstring",
ConstLabels: prometheus.Labels{"constname": "constvalue"},
},
[]string{"labelname"},
)
cntVec.WithLabelValues("val1").Inc()
cntVec.WithLabelValues("val2").Inc()
reg := prometheus.NewRegistry()
reg.MustRegister(cntVec)
want := `prefix.name.constname.constvalue.labelname.val1 1 1477043
prefix.name.constname.constvalue.labelname.val2 1 1477043
`
mfs, err := reg.Gather()
if err != nil {
t.Fatalf("error: %v", err)
}
now := model.Time(1477043083)
var buf bytes.Buffer
err = writeMetrics(&buf, mfs, "prefix", now)
if err != nil {
t.Fatalf("error: %v", err)
}
if got := buf.String(); want != got {
t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
}
}
func TestPush(t *testing.T) {
reg := prometheus.NewRegistry()
cntVec := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "name",
Help: "docstring",
ConstLabels: prometheus.Labels{"constname": "constvalue"},
},
[]string{"labelname"},
)
cntVec.WithLabelValues("val1").Inc()
cntVec.WithLabelValues("val2").Inc()
reg.MustRegister(cntVec)
host := "localhost"
port := ":56789"
b, err := NewBridge(&Config{
URL: host + port,
Gatherer: reg,
Prefix: "prefix",
})
if err != nil {
t.Fatalf("error creating bridge: %v", err)
}
nmg, err := newMockGraphite(port)
if err != nil {
t.Fatalf("error creating mock graphite: %v", err)
}
defer nmg.Close()
err = b.Push()
if err != nil {
t.Fatalf("error pushing: %v", err)
}
wants := []string{
"prefix.name.constname.constvalue.labelname.val1 1",
"prefix.name.constname.constvalue.labelname.val2 1",
}
select {
case got := <-nmg.readc:
for _, want := range wants {
matched, err := regexp.MatchString(want, got)
if err != nil {
t.Fatalf("error pushing: %v", err)
}
if !matched {
t.Fatalf("missing metric:\nno match for %s received by server:\n%s", want, got)
}
}
return
case err := <-nmg.errc:
t.Fatalf("error reading push: %v", err)
case <-time.After(50 * time.Millisecond):
t.Fatalf("no result from graphite server")
}
}
func newMockGraphite(port string) (*mockGraphite, error) {
readc := make(chan string)
errc := make(chan error)
ln, err := net.Listen("tcp", port)
if err != nil {
return nil, err
}
go func() {
conn, err := ln.Accept()
if err != nil {
errc <- err
}
var b bytes.Buffer
io.Copy(&b, conn)
readc <- b.String()
}()
return &mockGraphite{
readc: readc,
errc: errc,
Listener: ln,
}, nil
}
type mockGraphite struct {
readc chan string
errc chan error
net.Listener
}
func ExampleBridge() {
b, err := NewBridge(&Config{
URL: "graphite.example.org:3099",
Gatherer: prometheus.DefaultGatherer,
Prefix: "prefix",
Interval: 15 * time.Second,
Timeout: 10 * time.Second,
ErrorHandling: AbortOnError,
Logger: log.New(os.Stdout, "graphite bridge: ", log.Lshortfile),
})
if err != nil {
panic(err)
}
go func() {
// Start something in a goroutine that uses metrics.
}()
// Push initial metrics to Graphite. Fail fast if the push fails.
if err := b.Push(); err != nil {
panic(err)
}
// Create a Context to control stopping the Run() loop that pushes
// metrics to Graphite.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Start pushing metrics to Graphite in the Run() loop.
b.Run(ctx)
}

@ -16,7 +16,9 @@ package prometheus
import ( import (
"fmt" "fmt"
"math" "math"
"runtime"
"sort" "sort"
"sync"
"sync/atomic" "sync/atomic"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
@ -108,8 +110,9 @@ func ExponentialBuckets(start, factor float64, count int) []float64 {
} }
// HistogramOpts bundles the options for creating a Histogram metric. It is // HistogramOpts bundles the options for creating a Histogram metric. It is
// mandatory to set Name and Help to a non-empty string. All other fields are // mandatory to set Name to a non-empty string. All other fields are optional
// optional and can safely be left at their zero value. // and can safely be left at their zero value, although it is strongly
// encouraged to set a Help string.
type HistogramOpts struct { type HistogramOpts struct {
// Namespace, Subsystem, and Name are components of the fully-qualified // Namespace, Subsystem, and Name are components of the fully-qualified
// name of the Histogram (created by joining these components with // name of the Histogram (created by joining these components with
@ -120,29 +123,22 @@ type HistogramOpts struct {
Subsystem string Subsystem string
Name string Name string
// Help provides information about this Histogram. Mandatory! // Help provides information about this Histogram.
// //
// Metrics with the same fully-qualified name must have the same Help // Metrics with the same fully-qualified name must have the same Help
// string. // string.
Help string Help string
// ConstLabels are used to attach fixed labels to this // ConstLabels are used to attach fixed labels to this metric. Metrics
// Histogram. Histograms with the same fully-qualified name must have the // with the same fully-qualified name must have the same label names in
// same label names in their ConstLabels. // their ConstLabels.
// //
// Note that in most cases, labels have a value that varies during the // ConstLabels are only used rarely. In particular, do not use them to
// lifetime of a process. Those labels are usually managed with a // attach the same labels to all your metrics. Those use cases are
// HistogramVec. ConstLabels serve only special purposes. One is for the // better covered by target labels set by the scraping Prometheus
// special case where the value of a label does not change during the // server, or by one specific metric (e.g. a build_info or a
// lifetime of a process, e.g. if the revision of the running binary is // machine_role metric). See also
// put into a label. Another, more advanced purpose is if more than one // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
// Collector needs to collect Histograms with the same fully-qualified
// name. In that case, those Summaries must differ in the values of
// their ConstLabels. See the Collector examples.
//
// If the value of a label never changes (not even between binaries),
// that label most likely should not be a label at all (but part of the
// metric name).
ConstLabels Labels ConstLabels Labels
// Buckets defines the buckets into which observations are counted. Each // Buckets defines the buckets into which observations are counted. Each
@ -191,6 +187,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
desc: desc, desc: desc,
upperBounds: opts.Buckets, upperBounds: opts.Buckets,
labelPairs: makeLabelPairs(desc, labelValues), labelPairs: makeLabelPairs(desc, labelValues),
counts: [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}},
} }
for i, upperBound := range h.upperBounds { for i, upperBound := range h.upperBounds {
if i < len(h.upperBounds)-1 { if i < len(h.upperBounds)-1 {
@ -207,28 +204,53 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
} }
} }
} }
// Finally we know the final length of h.upperBounds and can make counts. // Finally we know the final length of h.upperBounds and can make counts
h.counts = make([]uint64, len(h.upperBounds)) // for both states:
h.counts[0].buckets = make([]uint64, len(h.upperBounds))
h.counts[1].buckets = make([]uint64, len(h.upperBounds))
h.init(h) // Init self-collection. h.init(h) // Init self-collection.
return h return h
} }
type histogram struct { type histogramCounts struct {
// sumBits contains the bits of the float64 representing the sum of all // sumBits contains the bits of the float64 representing the sum of all
// observations. sumBits and count have to go first in the struct to // observations. sumBits and count have to go first in the struct to
// guarantee alignment for atomic operations. // guarantee alignment for atomic operations.
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
sumBits uint64 sumBits uint64
count uint64 count uint64
buckets []uint64
}
selfCollector type histogram struct {
// Note that there is no mutex required. // countAndHotIdx is a complicated one. For lock-free yet atomic
// observations, we need to save the total count of observations again,
// combined with the index of the currently-hot counts struct, so that
// we can perform the operation on both values atomically. The least
// significant bit defines the hot counts struct. The remaining 63 bits
// represent the total count of observations. This happens under the
// assumption that the 63bit count will never overflow. Rationale: An
// observations takes about 30ns. Let's assume it could happen in
// 10ns. Overflowing the counter will then take at least (2^63)*10ns,
// which is about 3000 years.
//
// This has to be first in the struct for 64bit alignment. See
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
countAndHotIdx uint64
desc *Desc selfCollector
desc *Desc
writeMtx sync.Mutex // Only used in the Write method.
upperBounds []float64 upperBounds []float64
counts []uint64
// Two counts, one is "hot" for lock-free observations, the other is
// "cold" for writing out a dto.Metric. It has to be an array of
// pointers to guarantee 64bit alignment of the histogramCounts, see
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
counts [2]*histogramCounts
hotIdx int // Index of currently-hot counts. Only used within Write.
labelPairs []*dto.LabelPair labelPairs []*dto.LabelPair
} }
@ -248,36 +270,113 @@ func (h *histogram) Observe(v float64) {
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op // 300 buckets: 154 ns/op linear - binary 61.6 ns/op
i := sort.SearchFloat64s(h.upperBounds, v) i := sort.SearchFloat64s(h.upperBounds, v)
if i < len(h.counts) {
atomic.AddUint64(&h.counts[i], 1) // We increment h.countAndHotIdx by 2 so that the counter in the upper
// 63 bits gets incremented by 1. At the same time, we get the new value
// back, which we can use to find the currently-hot counts.
n := atomic.AddUint64(&h.countAndHotIdx, 2)
hotCounts := h.counts[n%2]
if i < len(h.upperBounds) {
atomic.AddUint64(&hotCounts.buckets[i], 1)
} }
atomic.AddUint64(&h.count, 1)
for { for {
oldBits := atomic.LoadUint64(&h.sumBits) oldBits := atomic.LoadUint64(&hotCounts.sumBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + v) newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) { if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
break break
} }
} }
// Increment count last as we take it as a signal that the observation
// is complete.
atomic.AddUint64(&hotCounts.count, 1)
} }
func (h *histogram) Write(out *dto.Metric) error { func (h *histogram) Write(out *dto.Metric) error {
his := &dto.Histogram{} var (
buckets := make([]*dto.Bucket, len(h.upperBounds)) his = &dto.Histogram{}
buckets = make([]*dto.Bucket, len(h.upperBounds))
hotCounts, coldCounts *histogramCounts
count uint64
)
// For simplicity, we mutex the rest of this method. It is not in the
// hot path, i.e. Observe is called much more often than Write. The
// complication of making Write lock-free isn't worth it.
h.writeMtx.Lock()
defer h.writeMtx.Unlock()
his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits))) // This is a bit arcane, which is why the following spells out this if
his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count)) // clause in English:
var count uint64 //
// If the currently-hot counts struct is #0, we atomically increment
// h.countAndHotIdx by 1 so that from now on Observe will use the counts
// struct #1. Furthermore, the atomic increment gives us the new value,
// which, in its most significant 63 bits, tells us the count of
// observations done so far up to and including currently ongoing
// observations still using the counts struct just changed from hot to
// cold. To have a normal uint64 for the count, we bitshift by 1 and
// save the result in count. We also set h.hotIdx to 1 for the next
// Write call, and we will refer to counts #1 as hotCounts and to counts
// #0 as coldCounts.
//
// If the currently-hot counts struct is #1, we do the corresponding
// things the other way round. We have to _decrement_ h.countAndHotIdx
// (which is a bit arcane in itself, as we have to express -1 with an
// unsigned int...).
if h.hotIdx == 0 {
count = atomic.AddUint64(&h.countAndHotIdx, 1) >> 1
h.hotIdx = 1
hotCounts = h.counts[1]
coldCounts = h.counts[0]
} else {
count = atomic.AddUint64(&h.countAndHotIdx, ^uint64(0)) >> 1 // Decrement.
h.hotIdx = 0
hotCounts = h.counts[0]
coldCounts = h.counts[1]
}
// Now we have to wait for the now-declared-cold counts to actually cool
// down, i.e. wait for all observations still using it to finish. That's
// the case once the count in the cold counts struct is the same as the
// one atomically retrieved from the upper 63bits of h.countAndHotIdx.
for {
if count == atomic.LoadUint64(&coldCounts.count) {
break
}
runtime.Gosched() // Let observations get work done.
}
his.SampleCount = proto.Uint64(count)
his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits)))
var cumCount uint64
for i, upperBound := range h.upperBounds { for i, upperBound := range h.upperBounds {
count += atomic.LoadUint64(&h.counts[i]) cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
buckets[i] = &dto.Bucket{ buckets[i] = &dto.Bucket{
CumulativeCount: proto.Uint64(count), CumulativeCount: proto.Uint64(cumCount),
UpperBound: proto.Float64(upperBound), UpperBound: proto.Float64(upperBound),
} }
} }
his.Bucket = buckets his.Bucket = buckets
out.Histogram = his out.Histogram = his
out.Label = h.labelPairs out.Label = h.labelPairs
// Finally add all the cold counts to the new hot counts and reset the cold counts.
atomic.AddUint64(&hotCounts.count, count)
atomic.StoreUint64(&coldCounts.count, 0)
for {
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum())
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
atomic.StoreUint64(&coldCounts.sumBits, 0)
break
}
}
for i := range h.upperBounds {
atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i]))
atomic.StoreUint64(&coldCounts.buckets[i], 0)
}
return nil return nil
} }
@ -287,12 +386,11 @@ func (h *histogram) Write(out *dto.Metric) error {
// (e.g. HTTP request latencies, partitioned by status code and method). Create // (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewHistogramVec. // instances with NewHistogramVec.
type HistogramVec struct { type HistogramVec struct {
*MetricVec *metricVec
} }
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and // NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
// partitioned by the given label names. At least one label name must be // partitioned by the given label names.
// provided.
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
desc := NewDesc( desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
@ -301,47 +399,116 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
opts.ConstLabels, opts.ConstLabels,
) )
return &HistogramVec{ return &HistogramVec{
MetricVec: newMetricVec(desc, func(lvs ...string) Metric { metricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newHistogram(desc, opts, lvs...) return newHistogram(desc, opts, lvs...)
}), }),
} }
} }
// GetMetricWithLabelValues replaces the method of the same name in // GetMetricWithLabelValues returns the Histogram for the given slice of label
// MetricVec. The difference is that this method returns a Histogram and not a // values (same order as the VariableLabels in Desc). If that combination of
// Metric so that no type conversion is required. // label values is accessed for the first time, a new Histogram is created.
func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) { //
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) // It is possible to call this method without using the returned Histogram to only
// create the new Histogram but leave it at its starting value, a Histogram without
// any observations.
//
// Keeping the Histogram for later use is possible (and should be considered if
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
// Delete can be used to delete the Histogram from the HistogramVec. In that case, the
// Histogram will still exist, but it will not be exported anymore, even if a
// Histogram with the same label values is created later. See also the CounterVec
// example.
//
// An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
// an alternative to avoid that type of mistake. For higher label numbers, the
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example.
func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
if metric != nil { if metric != nil {
return metric.(Histogram), err return metric.(Observer), err
} }
return nil, err return nil, err
} }
// GetMetricWith replaces the method of the same name in MetricVec. The // GetMetricWith returns the Histogram for the given Labels map (the label names
// difference is that this method returns a Histogram and not a Metric so that no // must match those of the VariableLabels in Desc). If that label map is
// type conversion is required. // accessed for the first time, a new Histogram is created. Implications of
func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) { // creating a Histogram without using it and keeping the Histogram for later use
metric, err := m.MetricVec.GetMetricWith(labels) // are the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
metric, err := v.metricVec.getMetricWith(labels)
if metric != nil { if metric != nil {
return metric.(Histogram), err return metric.(Observer), err
} }
return nil, err return nil, err
} }
// WithLabelValues works as GetMetricWithLabelValues, but panics where // WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. By not returning an // GetMetricWithLabelValues would have returned an error. Not returning an
// error, WithLabelValues allows shortcuts like // error allows shortcuts like
// myVec.WithLabelValues("404", "GET").Observe(42.21) // myVec.WithLabelValues("404", "GET").Observe(42.21)
func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram { func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
return m.MetricVec.WithLabelValues(lvs...).(Histogram) h, err := v.GetMetricWithLabelValues(lvs...)
if err != nil {
panic(err)
}
return h
}
// With works as GetMetricWith but panics where GetMetricWithLabels would have
// returned an error. Not returning an error allows shortcuts like
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
func (v *HistogramVec) With(labels Labels) Observer {
h, err := v.GetMetricWith(labels)
if err != nil {
panic(err)
}
return h
}
// CurryWith returns a vector curried with the provided labels, i.e. the
// returned vector has those labels pre-set for all labeled operations performed
// on it. The cardinality of the curried vector is reduced accordingly. The
// order of the remaining labels stays the same (just with the curried labels
// taken out of the sequence which is relevant for the
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
// vector, but only with labels not yet used for currying before.
//
// The metrics contained in the HistogramVec are shared between the curried and
// uncurried vectors. They are just accessed differently. Curried and uncurried
// vectors behave identically in terms of collection. Only one must be
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) {
vec, err := v.curryWith(labels)
if vec != nil {
return &HistogramVec{vec}, err
}
return nil, err
} }
// With works as GetMetricWith, but panics where GetMetricWithLabels would have // MustCurryWith works as CurryWith but panics where CurryWith would have
// returned an error. By not returning an error, With allows shortcuts like // returned an error.
// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec {
func (m *HistogramVec) With(labels Labels) Histogram { vec, err := v.CurryWith(labels)
return m.MetricVec.With(labels).(Histogram) if err != nil {
panic(err)
}
return vec
} }
type constHistogram struct { type constHistogram struct {
@ -393,7 +560,7 @@ func (h *constHistogram) Write(out *dto.Metric) error {
// bucket. // bucket.
// //
// NewConstHistogram returns an error if the length of labelValues is not // NewConstHistogram returns an error if the length of labelValues is not
// consistent with the variable labels in Desc. // consistent with the variable labels in Desc or if Desc is invalid.
func NewConstHistogram( func NewConstHistogram(
desc *Desc, desc *Desc,
count uint64, count uint64,
@ -401,8 +568,11 @@ func NewConstHistogram(
buckets map[float64]uint64, buckets map[float64]uint64,
labelValues ...string, labelValues ...string,
) (Metric, error) { ) (Metric, error) {
if len(desc.variableLabels) != len(labelValues) { if desc.err != nil {
return nil, errInconsistentCardinality return nil, desc.err
}
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
return nil, err
} }
return &constHistogram{ return &constHistogram{
desc: desc, desc: desc,

@ -17,6 +17,7 @@ import (
"math" "math"
"math/rand" "math/rand"
"reflect" "reflect"
"runtime"
"sort" "sort"
"sync" "sync"
"testing" "testing"
@ -119,6 +120,28 @@ func BenchmarkHistogramWrite8(b *testing.B) {
benchmarkHistogramWrite(8, b) benchmarkHistogramWrite(8, b)
} }
func TestHistogramNonMonotonicBuckets(t *testing.T) {
testCases := map[string][]float64{
"not strictly monotonic": {1, 2, 2, 3},
"not monotonic at all": {1, 2, 4, 3, 5},
"have +Inf in the middle": {1, 2, math.Inf(+1), 3},
}
for name, buckets := range testCases {
func() {
defer func() {
if r := recover(); r == nil {
t.Errorf("Buckets %v are %s but NewHistogram did not panic.", buckets, name)
}
}()
_ = NewHistogram(HistogramOpts{
Name: "test_histogram",
Help: "helpless",
Buckets: buckets,
})
}()
}
}
// Intentionally adding +Inf here to test if that case is handled correctly. // Intentionally adding +Inf here to test if that case is handled correctly.
// Also, getCumulativeCounts depends on it. // Also, getCumulativeCounts depends on it.
var testBuckets = []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)} var testBuckets = []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)}
@ -264,7 +287,7 @@ func TestHistogramVecConcurrency(t *testing.T) {
for i := 0; i < vecLength; i++ { for i := 0; i < vecLength; i++ {
m := &dto.Metric{} m := &dto.Metric{}
s := his.WithLabelValues(string('A' + i)) s := his.WithLabelValues(string('A' + i))
s.Write(m) s.(Histogram).Write(m)
if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want { if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want {
t.Errorf("got %d buckets in protobuf, want %d", got, want) t.Errorf("got %d buckets in protobuf, want %d", got, want)
@ -321,6 +344,50 @@ func TestBuckets(t *testing.T) {
got = ExponentialBuckets(100, 1.2, 3) got = ExponentialBuckets(100, 1.2, 3)
want = []float64{100, 120, 144} want = []float64{100, 120, 144}
if !reflect.DeepEqual(got, want) { if !reflect.DeepEqual(got, want) {
t.Errorf("linear buckets: got %v, want %v", got, want) t.Errorf("exponential buckets: got %v, want %v", got, want)
}
}
func TestHistogramAtomicObserve(t *testing.T) {
var (
quit = make(chan struct{})
his = NewHistogram(HistogramOpts{
Buckets: []float64{0.5, 10, 20},
})
)
defer func() { close(quit) }()
observe := func() {
for {
select {
case <-quit:
return
default:
his.Observe(1)
}
}
}
go observe()
go observe()
go observe()
for i := 0; i < 100; i++ {
m := &dto.Metric{}
if err := his.Write(m); err != nil {
t.Fatal("unexpected error writing histogram:", err)
}
h := m.GetHistogram()
if h.GetSampleCount() != uint64(h.GetSampleSum()) ||
h.GetSampleCount() != h.GetBucket()[1].GetCumulativeCount() ||
h.GetSampleCount() != h.GetBucket()[2].GetCumulativeCount() {
t.Fatalf(
"inconsistent counts in histogram: count=%d sum=%f buckets=[%d, %d]",
h.GetSampleCount(), h.GetSampleSum(),
h.GetBucket()[1].GetCumulativeCount(), h.GetBucket()[2].GetCumulativeCount(),
)
}
runtime.Gosched()
} }
} }

@ -61,15 +61,15 @@ func giveBuf(buf *bytes.Buffer) {
// name). // name).
// //
// Deprecated: Please note the issues described in the doc comment of // Deprecated: Please note the issues described in the doc comment of
// InstrumentHandler. You might want to consider using promhttp.Handler instead // InstrumentHandler. You might want to consider using promhttp.Handler instead.
// (which is non instrumented).
func Handler() http.Handler { func Handler() http.Handler {
return InstrumentHandler("prometheus", UninstrumentedHandler()) return InstrumentHandler("prometheus", UninstrumentedHandler())
} }
// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. // UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
// //
// Deprecated: Use promhttp.Handler instead. See there for further documentation. // Deprecated: Use promhttp.HandlerFor(DefaultGatherer, promhttp.HandlerOpts{})
// instead. See there for further documentation.
func UninstrumentedHandler() http.Handler { func UninstrumentedHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
mfs, err := DefaultGatherer.Gather() mfs, err := DefaultGatherer.Gather()
@ -95,7 +95,7 @@ func UninstrumentedHandler() http.Handler {
closer.Close() closer.Close()
} }
if lastErr != nil && buf.Len() == 0 { if lastErr != nil && buf.Len() == 0 {
http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError) http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError)
return return
} }
header := w.Header() header := w.Header()
@ -115,7 +115,7 @@ func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string)
header := request.Header.Get(acceptEncodingHeader) header := request.Header.Get(acceptEncodingHeader)
parts := strings.Split(header, ",") parts := strings.Split(header, ",")
for _, part := range parts { for _, part := range parts {
part := strings.TrimSpace(part) part = strings.TrimSpace(part)
if part == "gzip" || strings.HasPrefix(part, "gzip;") { if part == "gzip" || strings.HasPrefix(part, "gzip;") {
return gzip.NewWriter(writer), "gzip" return gzip.NewWriter(writer), "gzip"
} }
@ -139,16 +139,6 @@ var now nower = nowFunc(func() time.Time {
return time.Now() return time.Now()
}) })
func nowSeries(t ...time.Time) nower {
return nowFunc(func() time.Time {
defer func() {
t = t[1:]
}()
return t[0]
})
}
// InstrumentHandler wraps the given HTTP handler for instrumentation. It // InstrumentHandler wraps the given HTTP handler for instrumentation. It
// registers four metric collectors (if not already done) and reports HTTP // registers four metric collectors (if not already done) and reports HTTP
// metrics to the (newly or already) registered collectors: http_requests_total // metrics to the (newly or already) registered collectors: http_requests_total
@ -158,23 +148,16 @@ func nowSeries(t ...time.Time) nower {
// value. http_requests_total is a metric vector partitioned by HTTP method // value. http_requests_total is a metric vector partitioned by HTTP method
// (label name "method") and HTTP status code (label name "code"). // (label name "method") and HTTP status code (label name "code").
// //
// Deprecated: InstrumentHandler has several issues: // Deprecated: InstrumentHandler has several issues. Use the tooling provided in
// // package promhttp instead. The issues are the following: (1) It uses Summaries
// - It uses Summaries rather than Histograms. Summaries are not useful if // rather than Histograms. Summaries are not useful if aggregation across
// aggregation across multiple instances is required. // multiple instances is required. (2) It uses microseconds as unit, which is
// // deprecated and should be replaced by seconds. (3) The size of the request is
// - It uses microseconds as unit, which is deprecated and should be replaced by // calculated in a separate goroutine. Since this calculator requires access to
// seconds. // the request header, it creates a race with any writes to the header performed
// // during request handling. httputil.ReverseProxy is a prominent example for a
// - The size of the request is calculated in a separate goroutine. Since this // handler performing such writes. (4) It has additional issues with HTTP/2, cf.
// calculator requires access to the request header, it creates a race with // https://github.com/prometheus/client_golang/issues/272.
// any writes to the header performed during request handling.
// httputil.ReverseProxy is a prominent example for a handler
// performing such writes.
//
// Upcoming versions of this package will provide ways of instrumenting HTTP
// handlers that are more flexible and have fewer issues. Please prefer direct
// instrumentation in the meantime.
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
} }
@ -184,12 +167,13 @@ func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFun
// issues). // issues).
// //
// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as // Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
// InstrumentHandler is. // InstrumentHandler is. Use the tooling provided in package promhttp instead.
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
return InstrumentHandlerFuncWithOpts( return InstrumentHandlerFuncWithOpts(
SummaryOpts{ SummaryOpts{
Subsystem: "http", Subsystem: "http",
ConstLabels: Labels{"handler": handlerName}, ConstLabels: Labels{"handler": handlerName},
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}, },
handlerFunc, handlerFunc,
) )
@ -222,7 +206,7 @@ func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWri
// SummaryOpts. // SummaryOpts.
// //
// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as // Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
// InstrumentHandler is. // InstrumentHandler is. Use the tooling provided in package promhttp instead.
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
} }
@ -233,7 +217,7 @@ func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.Hand
// SummaryOpts are used. // SummaryOpts are used.
// //
// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons // Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
// as InstrumentHandler is. // as InstrumentHandler is. Use the tooling provided in package promhttp instead.
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
reqCnt := NewCounterVec( reqCnt := NewCounterVec(
CounterOpts{ CounterOpts{
@ -245,34 +229,52 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo
}, },
instLabels, instLabels,
) )
if err := Register(reqCnt); err != nil {
if are, ok := err.(AlreadyRegisteredError); ok {
reqCnt = are.ExistingCollector.(*CounterVec)
} else {
panic(err)
}
}
opts.Name = "request_duration_microseconds" opts.Name = "request_duration_microseconds"
opts.Help = "The HTTP request latencies in microseconds." opts.Help = "The HTTP request latencies in microseconds."
reqDur := NewSummary(opts) reqDur := NewSummary(opts)
if err := Register(reqDur); err != nil {
if are, ok := err.(AlreadyRegisteredError); ok {
reqDur = are.ExistingCollector.(Summary)
} else {
panic(err)
}
}
opts.Name = "request_size_bytes" opts.Name = "request_size_bytes"
opts.Help = "The HTTP request sizes in bytes." opts.Help = "The HTTP request sizes in bytes."
reqSz := NewSummary(opts) reqSz := NewSummary(opts)
if err := Register(reqSz); err != nil {
if are, ok := err.(AlreadyRegisteredError); ok {
reqSz = are.ExistingCollector.(Summary)
} else {
panic(err)
}
}
opts.Name = "response_size_bytes" opts.Name = "response_size_bytes"
opts.Help = "The HTTP response sizes in bytes." opts.Help = "The HTTP response sizes in bytes."
resSz := NewSummary(opts) resSz := NewSummary(opts)
if err := Register(resSz); err != nil {
regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec) if are, ok := err.(AlreadyRegisteredError); ok {
regReqDur := MustRegisterOrGet(reqDur).(Summary) resSz = are.ExistingCollector.(Summary)
regReqSz := MustRegisterOrGet(reqSz).(Summary) } else {
regResSz := MustRegisterOrGet(resSz).(Summary) panic(err)
}
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
now := time.Now() now := time.Now()
delegate := &responseWriterDelegator{ResponseWriter: w} delegate := &responseWriterDelegator{ResponseWriter: w}
out := make(chan int) out := computeApproximateRequestSize(r)
urlLen := 0
if r.URL != nil {
urlLen = len(r.URL.String())
}
go computeApproximateRequestSize(r, out, urlLen)
_, cn := w.(http.CloseNotifier) _, cn := w.(http.CloseNotifier)
_, fl := w.(http.Flusher) _, fl := w.(http.Flusher)
@ -290,39 +292,52 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo
method := sanitizeMethod(r.Method) method := sanitizeMethod(r.Method)
code := sanitizeCode(delegate.status) code := sanitizeCode(delegate.status)
regReqCnt.WithLabelValues(method, code).Inc() reqCnt.WithLabelValues(method, code).Inc()
regReqDur.Observe(elapsed) reqDur.Observe(elapsed)
regResSz.Observe(float64(delegate.written)) resSz.Observe(float64(delegate.written))
regReqSz.Observe(float64(<-out)) reqSz.Observe(float64(<-out))
}) })
} }
func computeApproximateRequestSize(r *http.Request, out chan int, s int) { func computeApproximateRequestSize(r *http.Request) <-chan int {
s += len(r.Method) // Get URL length in current goroutine for avoiding a race condition.
s += len(r.Proto) // HandlerFunc that runs in parallel may modify the URL.
for name, values := range r.Header { s := 0
s += len(name) if r.URL != nil {
for _, value := range values { s += len(r.URL.String())
s += len(value)
}
} }
s += len(r.Host)
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. out := make(chan int, 1)
if r.ContentLength != -1 { go func() {
s += int(r.ContentLength) s += len(r.Method)
} s += len(r.Proto)
out <- s for name, values := range r.Header {
s += len(name)
for _, value := range values {
s += len(value)
}
}
s += len(r.Host)
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
if r.ContentLength != -1 {
s += int(r.ContentLength)
}
out <- s
close(out)
}()
return out
} }
type responseWriterDelegator struct { type responseWriterDelegator struct {
http.ResponseWriter http.ResponseWriter
handler, method string status int
status int written int64
written int64 wroteHeader bool
wroteHeader bool
} }
func (r *responseWriterDelegator) WriteHeader(code int) { func (r *responseWriterDelegator) WriteHeader(code int) {

@ -29,6 +29,16 @@ func (b respBody) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(b)) w.Write([]byte(b))
} }
func nowSeries(t ...time.Time) nower {
return nowFunc(func() time.Time {
defer func() {
t = t[1:]
}()
return t[0]
})
}
func TestInstrumentHandler(t *testing.T) { func TestInstrumentHandler(t *testing.T) {
defer func(n nower) { defer func(n nower) {
now = n.(nower) now = n.(nower)
@ -37,16 +47,17 @@ func TestInstrumentHandler(t *testing.T) {
instant := time.Now() instant := time.Now()
end := instant.Add(30 * time.Second) end := instant.Add(30 * time.Second)
now = nowSeries(instant, end) now = nowSeries(instant, end)
respBody := respBody("Howdy there!") body := respBody("Howdy there!")
hndlr := InstrumentHandler("test-handler", respBody) hndlr := InstrumentHandler("test-handler", body)
opts := SummaryOpts{ opts := SummaryOpts{
Subsystem: "http", Subsystem: "http",
ConstLabels: Labels{"handler": "test-handler"}, ConstLabels: Labels{"handler": "test-handler"},
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
} }
reqCnt := MustRegisterOrGet(NewCounterVec( reqCnt := NewCounterVec(
CounterOpts{ CounterOpts{
Namespace: opts.Namespace, Namespace: opts.Namespace,
Subsystem: opts.Subsystem, Subsystem: opts.Subsystem,
@ -55,19 +66,51 @@ func TestInstrumentHandler(t *testing.T) {
ConstLabels: opts.ConstLabels, ConstLabels: opts.ConstLabels,
}, },
instLabels, instLabels,
)).(*CounterVec) )
err := Register(reqCnt)
if err == nil {
t.Fatal("expected reqCnt to be registered already")
}
if are, ok := err.(AlreadyRegisteredError); ok {
reqCnt = are.ExistingCollector.(*CounterVec)
} else {
t.Fatal("unexpected registration error:", err)
}
opts.Name = "request_duration_microseconds" opts.Name = "request_duration_microseconds"
opts.Help = "The HTTP request latencies in microseconds." opts.Help = "The HTTP request latencies in microseconds."
reqDur := MustRegisterOrGet(NewSummary(opts)).(Summary) reqDur := NewSummary(opts)
err = Register(reqDur)
if err == nil {
t.Fatal("expected reqDur to be registered already")
}
if are, ok := err.(AlreadyRegisteredError); ok {
reqDur = are.ExistingCollector.(Summary)
} else {
t.Fatal("unexpected registration error:", err)
}
opts.Name = "request_size_bytes" opts.Name = "request_size_bytes"
opts.Help = "The HTTP request sizes in bytes." opts.Help = "The HTTP request sizes in bytes."
MustRegisterOrGet(NewSummary(opts)) reqSz := NewSummary(opts)
err = Register(reqSz)
if err == nil {
t.Fatal("expected reqSz to be registered already")
}
if _, ok := err.(AlreadyRegisteredError); !ok {
t.Fatal("unexpected registration error:", err)
}
opts.Name = "response_size_bytes" opts.Name = "response_size_bytes"
opts.Help = "The HTTP response sizes in bytes." opts.Help = "The HTTP response sizes in bytes."
MustRegisterOrGet(NewSummary(opts)) resSz := NewSummary(opts)
err = Register(resSz)
if err == nil {
t.Fatal("expected resSz to be registered already")
}
if _, ok := err.(AlreadyRegisteredError); !ok {
t.Fatal("unexpected registration error:", err)
}
reqCnt.Reset() reqCnt.Reset()
@ -81,8 +124,8 @@ func TestInstrumentHandler(t *testing.T) {
if resp.Code != http.StatusTeapot { if resp.Code != http.StatusTeapot {
t.Fatalf("expected status %d, got %d", http.StatusTeapot, resp.Code) t.Fatalf("expected status %d, got %d", http.StatusTeapot, resp.Code)
} }
if string(resp.Body.Bytes()) != "Howdy there!" { if resp.Body.String() != "Howdy there!" {
t.Fatalf("expected body %s, got %s", "Howdy there!", string(resp.Body.Bytes())) t.Fatalf("expected body %s, got %s", "Howdy there!", resp.Body.String())
} }
out := &dto.Metric{} out := &dto.Metric{}
@ -95,7 +138,7 @@ func TestInstrumentHandler(t *testing.T) {
} }
out.Reset() out.Reset()
if want, got := 1, len(reqCnt.children); want != got { if want, got := 1, len(reqCnt.metricMap.metrics); want != got {
t.Errorf("want %d children in reqCnt, got %d", want, got) t.Errorf("want %d children in reqCnt, got %d", want, got)
} }
cnt, err := reqCnt.GetMetricWithLabelValues("get", "418") cnt, err := reqCnt.GetMetricWithLabelValues("get", "418")

@ -0,0 +1,85 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"sort"
dto "github.com/prometheus/client_model/go"
)
// metricSorter is a sortable slice of *dto.Metric.
type metricSorter []*dto.Metric
func (s metricSorter) Len() int {
return len(s)
}
func (s metricSorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s metricSorter) Less(i, j int) bool {
if len(s[i].Label) != len(s[j].Label) {
// This should not happen. The metrics are
// inconsistent. However, we have to deal with the fact, as
// people might use custom collectors or metric family injection
// to create inconsistent metrics. So let's simply compare the
// number of labels in this case. That will still yield
// reproducible sorting.
return len(s[i].Label) < len(s[j].Label)
}
for n, lp := range s[i].Label {
vi := lp.GetValue()
vj := s[j].Label[n].GetValue()
if vi != vj {
return vi < vj
}
}
// We should never arrive here. Multiple metrics with the same
// label set in the same scrape will lead to undefined ingestion
// behavior. However, as above, we have to provide stable sorting
// here, even for inconsistent metrics. So sort equal metrics
// by their timestamp, with missing timestamps (implying "now")
// coming last.
if s[i].TimestampMs == nil {
return false
}
if s[j].TimestampMs == nil {
return true
}
return s[i].GetTimestampMs() < s[j].GetTimestampMs()
}
// NormalizeMetricFamilies returns a MetricFamily slice with empty
// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
// the slice, with the contained Metrics sorted within each MetricFamily.
func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
for _, mf := range metricFamiliesByName {
sort.Sort(metricSorter(mf.Metric))
}
names := make([]string, 0, len(metricFamiliesByName))
for name, mf := range metricFamiliesByName {
if len(mf.Metric) > 0 {
names = append(names, name)
}
}
sort.Strings(names)
result := make([]*dto.MetricFamily, 0, len(names))
for _, name := range names {
result = append(result, metricFamiliesByName[name])
}
return result
}

@ -0,0 +1,70 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"errors"
"fmt"
"strings"
"unicode/utf8"
"github.com/prometheus/common/model"
)
// Labels represents a collection of label name -> value mappings. This type is
// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
// metric vector Collectors, e.g.:
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
//
// The other use-case is the specification of constant label pairs in Opts or to
// create a Desc.
type Labels map[string]string
// reservedLabelPrefix is a prefix which is not legal in user-supplied
// label names.
const reservedLabelPrefix = "__"
var errInconsistentCardinality = errors.New("inconsistent label cardinality")
func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
if len(labels) != expectedNumberOfValues {
return errInconsistentCardinality
}
for name, val := range labels {
if !utf8.ValidString(val) {
return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val)
}
}
return nil
}
func validateLabelValues(vals []string, expectedNumberOfValues int) error {
if len(vals) != expectedNumberOfValues {
return errInconsistentCardinality
}
for _, val := range vals {
if !utf8.ValidString(val) {
return fmt.Errorf("label value %q is not valid UTF-8", val)
}
}
return nil
}
func checkLabelName(l string) bool {
return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix)
}

@ -15,6 +15,9 @@ package prometheus
import ( import (
"strings" "strings"
"time"
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
) )
@ -43,9 +46,8 @@ type Metric interface {
// While populating dto.Metric, it is the responsibility of the // While populating dto.Metric, it is the responsibility of the
// implementation to ensure validity of the Metric protobuf (like valid // implementation to ensure validity of the Metric protobuf (like valid
// UTF-8 strings or syntactically valid metric and label names). It is // UTF-8 strings or syntactically valid metric and label names). It is
// recommended to sort labels lexicographically. (Implementers may find // recommended to sort labels lexicographically. Callers of Write should
// LabelPairSorter useful for that.) Callers of Write should still make // still make sure of sorting if they depend on it.
// sure of sorting if they depend on it.
Write(*dto.Metric) error Write(*dto.Metric) error
// TODO(beorn7): The original rationale of passing in a pre-allocated // TODO(beorn7): The original rationale of passing in a pre-allocated
// dto.Metric protobuf to save allocations has disappeared. The // dto.Metric protobuf to save allocations has disappeared. The
@ -57,8 +59,9 @@ type Metric interface {
// implementation XXX has its own XXXOpts type, but in most cases, it is just be // implementation XXX has its own XXXOpts type, but in most cases, it is just be
// an alias of this type (which might change when the requirement arises.) // an alias of this type (which might change when the requirement arises.)
// //
// It is mandatory to set Name and Help to a non-empty string. All other fields // It is mandatory to set Name to a non-empty string. All other fields are
// are optional and can safely be left at their zero value. // optional and can safely be left at their zero value, although it is strongly
// encouraged to set a Help string.
type Opts struct { type Opts struct {
// Namespace, Subsystem, and Name are components of the fully-qualified // Namespace, Subsystem, and Name are components of the fully-qualified
// name of the Metric (created by joining these components with // name of the Metric (created by joining these components with
@ -69,7 +72,7 @@ type Opts struct {
Subsystem string Subsystem string
Name string Name string
// Help provides information about this metric. Mandatory! // Help provides information about this metric.
// //
// Metrics with the same fully-qualified name must have the same Help // Metrics with the same fully-qualified name must have the same Help
// string. // string.
@ -79,20 +82,12 @@ type Opts struct {
// with the same fully-qualified name must have the same label names in // with the same fully-qualified name must have the same label names in
// their ConstLabels. // their ConstLabels.
// //
// Note that in most cases, labels have a value that varies during the // ConstLabels are only used rarely. In particular, do not use them to
// lifetime of a process. Those labels are usually managed with a metric // attach the same labels to all your metrics. Those use cases are
// vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels // better covered by target labels set by the scraping Prometheus
// serve only special purposes. One is for the special case where the // server, or by one specific metric (e.g. a build_info or a
// value of a label does not change during the lifetime of a process, // machine_role metric). See also
// e.g. if the revision of the running binary is put into a // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
// label. Another, more advanced purpose is if more than one Collector
// needs to collect Metrics with the same fully-qualified name. In that
// case, those Metrics must differ in the values of their
// ConstLabels. See the Collector examples.
//
// If the value of a label never changes (not even between binaries),
// that label most likely should not be a label at all (but part of the
// metric name).
ConstLabels Labels ConstLabels Labels
} }
@ -118,37 +113,22 @@ func BuildFQName(namespace, subsystem, name string) string {
return name return name
} }
// LabelPairSorter implements sort.Interface. It is used to sort a slice of // labelPairSorter implements sort.Interface. It is used to sort a slice of
// dto.LabelPair pointers. This is useful for implementing the Write method of // dto.LabelPair pointers.
// custom metrics. type labelPairSorter []*dto.LabelPair
type LabelPairSorter []*dto.LabelPair
func (s LabelPairSorter) Len() int { func (s labelPairSorter) Len() int {
return len(s) return len(s)
} }
func (s LabelPairSorter) Swap(i, j int) { func (s labelPairSorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i] s[i], s[j] = s[j], s[i]
} }
func (s LabelPairSorter) Less(i, j int) bool { func (s labelPairSorter) Less(i, j int) bool {
return s[i].GetName() < s[j].GetName() return s[i].GetName() < s[j].GetName()
} }
type hashSorter []uint64
func (s hashSorter) Len() int {
return len(s)
}
func (s hashSorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s hashSorter) Less(i, j int) bool {
return s[i] < s[j]
}
type invalidMetric struct { type invalidMetric struct {
desc *Desc desc *Desc
err error err error
@ -164,3 +144,31 @@ func NewInvalidMetric(desc *Desc, err error) Metric {
func (m *invalidMetric) Desc() *Desc { return m.desc } func (m *invalidMetric) Desc() *Desc { return m.desc }
func (m *invalidMetric) Write(*dto.Metric) error { return m.err } func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
type timestampedMetric struct {
Metric
t time.Time
}
func (m timestampedMetric) Write(pb *dto.Metric) error {
e := m.Metric.Write(pb)
pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000))
return e
}
// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a
// way that it has an explicit timestamp set to the provided Time. This is only
// useful in rare cases as the timestamp of a Prometheus metric should usually
// be set by the Prometheus server during scraping. Exceptions include mirroring
// metrics with given timestamps from other metric
// sources.
//
// NewMetricWithTimestamp works best with MustNewConstMetric,
// MustNewConstHistogram, and MustNewConstSummary, see example.
//
// Currently, the exposition formats used by Prometheus are limited to
// millisecond resolution. Thus, the provided time will be rounded down to the
// next full millisecond value.
func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
return timestampedMetric{Metric: m, t: t}
}

@ -0,0 +1,52 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
// Observer is the interface that wraps the Observe method, which is used by
// Histogram and Summary to add observations.
type Observer interface {
Observe(float64)
}
// The ObserverFunc type is an adapter to allow the use of ordinary
// functions as Observers. If f is a function with the appropriate
// signature, ObserverFunc(f) is an Observer that calls f.
//
// This adapter is usually used in connection with the Timer type, and there are
// two general use cases:
//
// The most common one is to use a Gauge as the Observer for a Timer.
// See the "Gauge" Timer example.
//
// The more advanced use case is to create a function that dynamically decides
// which Observer to use for observing the duration. See the "Complex" Timer
// example.
type ObserverFunc func(float64)
// Observe calls f(value). It implements Observer.
func (f ObserverFunc) Observe(value float64) {
f(value)
}
// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`.
type ObserverVec interface {
GetMetricWith(Labels) (Observer, error)
GetMetricWithLabelValues(lvs ...string) (Observer, error)
With(Labels) Observer
WithLabelValues(...string) Observer
CurryWith(Labels) (ObserverVec, error)
MustCurryWith(Labels) ObserverVec
Collector
}

@ -13,89 +13,139 @@
package prometheus package prometheus
import "github.com/prometheus/procfs" import (
"errors"
"os"
"github.com/prometheus/procfs"
)
type processCollector struct { type processCollector struct {
pid int
collectFn func(chan<- Metric) collectFn func(chan<- Metric)
pidFn func() (int, error) pidFn func() (int, error)
cpuTotal Counter reportErrors bool
openFDs, maxFDs Gauge cpuTotal *Desc
vsize, rss Gauge openFDs, maxFDs *Desc
startTime Gauge vsize, maxVsize *Desc
rss *Desc
startTime *Desc
} }
// NewProcessCollector returns a collector which exports the current state of // ProcessCollectorOpts defines the behavior of a process metrics collector
// process metrics including cpu, memory and file descriptor usage as well as // created with NewProcessCollector.
// the process start time for the given process id under the given namespace. type ProcessCollectorOpts struct {
func NewProcessCollector(pid int, namespace string) Collector { // PidFn returns the PID of the process the collector collects metrics
return NewProcessCollectorPIDFn( // for. It is called upon each collection. By default, the PID of the
func() (int, error) { return pid, nil }, // current process is used, as determined on construction time by
namespace, // calling os.Getpid().
) PidFn func() (int, error)
// If non-empty, each of the collected metrics is prefixed by the
// provided string and an underscore ("_").
Namespace string
// If true, any error encountered during collection is reported as an
// invalid metric (see NewInvalidMetric). Otherwise, errors are ignored
// and the collected metrics will be incomplete. (Possibly, no metrics
// will be collected at all.) While that's usually not desired, it is
// appropriate for the common "mix-in" of process metrics, where process
// metrics are nice to have, but failing to collect them should not
// disrupt the collection of the remaining metrics.
ReportErrors bool
} }
// NewProcessCollectorPIDFn returns a collector which exports the current state // NewProcessCollector returns a collector which exports the current state of
// of process metrics including cpu, memory and file descriptor usage as well // process metrics including CPU, memory and file descriptor usage as well as
// as the process start time under the given namespace. The given pidFn is // the process start time. The detailed behavior is defined by the provided
// called on each collect and is used to determine the process to export // ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a
// metrics for. // collector for the current process with an empty namespace string and no error
func NewProcessCollectorPIDFn( // reporting.
pidFn func() (int, error), //
namespace string, // Currently, the collector depends on a Linux-style proc filesystem and
) Collector { // therefore only exports metrics for Linux.
c := processCollector{ //
pidFn: pidFn, // Note: An older version of this function had the following signature:
collectFn: func(chan<- Metric) {}, //
// NewProcessCollector(pid int, namespace string) Collector
cpuTotal: NewCounter(CounterOpts{ //
Namespace: namespace, // Most commonly, it was called as
Name: "process_cpu_seconds_total", //
Help: "Total user and system CPU time spent in seconds.", // NewProcessCollector(os.Getpid(), "")
}), //
openFDs: NewGauge(GaugeOpts{ // The following call of the current version is equivalent to the above:
Namespace: namespace, //
Name: "process_open_fds", // NewProcessCollector(ProcessCollectorOpts{})
Help: "Number of open file descriptors.", func NewProcessCollector(opts ProcessCollectorOpts) Collector {
}), ns := ""
maxFDs: NewGauge(GaugeOpts{ if len(opts.Namespace) > 0 {
Namespace: namespace, ns = opts.Namespace + "_"
Name: "process_max_fds", }
Help: "Maximum number of open file descriptors.",
}), c := &processCollector{
vsize: NewGauge(GaugeOpts{ reportErrors: opts.ReportErrors,
Namespace: namespace, cpuTotal: NewDesc(
Name: "process_virtual_memory_bytes", ns+"process_cpu_seconds_total",
Help: "Virtual memory size in bytes.", "Total user and system CPU time spent in seconds.",
}), nil, nil,
rss: NewGauge(GaugeOpts{ ),
Namespace: namespace, openFDs: NewDesc(
Name: "process_resident_memory_bytes", ns+"process_open_fds",
Help: "Resident memory size in bytes.", "Number of open file descriptors.",
}), nil, nil,
startTime: NewGauge(GaugeOpts{ ),
Namespace: namespace, maxFDs: NewDesc(
Name: "process_start_time_seconds", ns+"process_max_fds",
Help: "Start time of the process since unix epoch in seconds.", "Maximum number of open file descriptors.",
}), nil, nil,
),
vsize: NewDesc(
ns+"process_virtual_memory_bytes",
"Virtual memory size in bytes.",
nil, nil,
),
maxVsize: NewDesc(
ns+"process_virtual_memory_max_bytes",
"Maximum amount of virtual memory available in bytes.",
nil, nil,
),
rss: NewDesc(
ns+"process_resident_memory_bytes",
"Resident memory size in bytes.",
nil, nil,
),
startTime: NewDesc(
ns+"process_start_time_seconds",
"Start time of the process since unix epoch in seconds.",
nil, nil,
),
}
if opts.PidFn == nil {
pid := os.Getpid()
c.pidFn = func() (int, error) { return pid, nil }
} else {
c.pidFn = opts.PidFn
} }
// Set up process metric collection if supported by the runtime. // Set up process metric collection if supported by the runtime.
if _, err := procfs.NewStat(); err == nil { if _, err := procfs.NewStat(); err == nil {
c.collectFn = c.processCollect c.collectFn = c.processCollect
} else {
c.collectFn = func(ch chan<- Metric) {
c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
}
} }
return &c return c
} }
// Describe returns all descriptions of the collector. // Describe returns all descriptions of the collector.
func (c *processCollector) Describe(ch chan<- *Desc) { func (c *processCollector) Describe(ch chan<- *Desc) {
ch <- c.cpuTotal.Desc() ch <- c.cpuTotal
ch <- c.openFDs.Desc() ch <- c.openFDs
ch <- c.maxFDs.Desc() ch <- c.maxFDs
ch <- c.vsize.Desc() ch <- c.vsize
ch <- c.rss.Desc() ch <- c.maxVsize
ch <- c.startTime.Desc() ch <- c.rss
ch <- c.startTime
} }
// Collect returns the current state of all metrics of the collector. // Collect returns the current state of all metrics of the collector.
@ -103,40 +153,52 @@ func (c *processCollector) Collect(ch chan<- Metric) {
c.collectFn(ch) c.collectFn(ch)
} }
// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
// client allows users to configure the error behavior.
func (c *processCollector) processCollect(ch chan<- Metric) { func (c *processCollector) processCollect(ch chan<- Metric) {
pid, err := c.pidFn() pid, err := c.pidFn()
if err != nil { if err != nil {
c.reportError(ch, nil, err)
return return
} }
p, err := procfs.NewProc(pid) p, err := procfs.NewProc(pid)
if err != nil { if err != nil {
c.reportError(ch, nil, err)
return return
} }
if stat, err := p.NewStat(); err == nil { if stat, err := p.NewStat(); err == nil {
c.cpuTotal.Set(stat.CPUTime()) ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
ch <- c.cpuTotal ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
c.vsize.Set(float64(stat.VirtualMemory())) ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
ch <- c.vsize
c.rss.Set(float64(stat.ResidentMemory()))
ch <- c.rss
if startTime, err := stat.StartTime(); err == nil { if startTime, err := stat.StartTime(); err == nil {
c.startTime.Set(startTime) ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
ch <- c.startTime } else {
c.reportError(ch, c.startTime, err)
} }
} else {
c.reportError(ch, nil, err)
} }
if fds, err := p.FileDescriptorsLen(); err == nil { if fds, err := p.FileDescriptorsLen(); err == nil {
c.openFDs.Set(float64(fds)) ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
ch <- c.openFDs } else {
c.reportError(ch, c.openFDs, err)
} }
if limits, err := p.NewLimits(); err == nil { if limits, err := p.NewLimits(); err == nil {
c.maxFDs.Set(float64(limits.OpenFiles)) ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
ch <- c.maxFDs ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
} else {
c.reportError(ch, nil, err)
}
}
func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
if !c.reportErrors {
return
}
if desc == nil {
desc = NewInvalidDesc(err)
} }
ch <- NewInvalidMetric(desc, err)
} }

@ -1,13 +1,31 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
package prometheus package prometheus
import ( import (
"bytes" "bytes"
"errors"
"os" "os"
"regexp" "regexp"
"testing" "testing"
"github.com/prometheus/common/expfmt" "github.com/prometheus/common/expfmt"
"github.com/prometheus/procfs" "github.com/prometheus/procfs"
dto "github.com/prometheus/client_model/go"
) )
func TestProcessCollector(t *testing.T) { func TestProcessCollector(t *testing.T) {
@ -16,12 +34,14 @@ func TestProcessCollector(t *testing.T) {
} }
registry := NewRegistry() registry := NewRegistry()
if err := registry.Register(NewProcessCollector(os.Getpid(), "")); err != nil { if err := registry.Register(NewProcessCollector(ProcessCollectorOpts{})); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if err := registry.Register(NewProcessCollectorPIDFn( if err := registry.Register(NewProcessCollector(ProcessCollectorOpts{
func() (int, error) { return os.Getpid(), nil }, "foobar"), PidFn: func() (int, error) { return os.Getpid(), nil },
); err != nil { Namespace: "foobar",
ReportErrors: true, // No errors expected, just to see if none are reported.
})); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -38,21 +58,46 @@ func TestProcessCollector(t *testing.T) {
} }
for _, re := range []*regexp.Regexp{ for _, re := range []*regexp.Regexp{
regexp.MustCompile("process_cpu_seconds_total [0-9]"), regexp.MustCompile("\nprocess_cpu_seconds_total [0-9]"),
regexp.MustCompile("process_max_fds [1-9]"), regexp.MustCompile("\nprocess_max_fds [1-9]"),
regexp.MustCompile("process_open_fds [1-9]"), regexp.MustCompile("\nprocess_open_fds [1-9]"),
regexp.MustCompile("process_virtual_memory_bytes [1-9]"), regexp.MustCompile("\nprocess_virtual_memory_max_bytes (-1|[1-9])"),
regexp.MustCompile("process_resident_memory_bytes [1-9]"), regexp.MustCompile("\nprocess_virtual_memory_bytes [1-9]"),
regexp.MustCompile("process_start_time_seconds [0-9.]{10,}"), regexp.MustCompile("\nprocess_resident_memory_bytes [1-9]"),
regexp.MustCompile("foobar_process_cpu_seconds_total [0-9]"), regexp.MustCompile("\nprocess_start_time_seconds [0-9.]{10,}"),
regexp.MustCompile("foobar_process_max_fds [1-9]"), regexp.MustCompile("\nfoobar_process_cpu_seconds_total [0-9]"),
regexp.MustCompile("foobar_process_open_fds [1-9]"), regexp.MustCompile("\nfoobar_process_max_fds [1-9]"),
regexp.MustCompile("foobar_process_virtual_memory_bytes [1-9]"), regexp.MustCompile("\nfoobar_process_open_fds [1-9]"),
regexp.MustCompile("foobar_process_resident_memory_bytes [1-9]"), regexp.MustCompile("\nfoobar_process_virtual_memory_max_bytes (-1|[1-9])"),
regexp.MustCompile("foobar_process_start_time_seconds [0-9.]{10,}"), regexp.MustCompile("\nfoobar_process_virtual_memory_bytes [1-9]"),
regexp.MustCompile("\nfoobar_process_resident_memory_bytes [1-9]"),
regexp.MustCompile("\nfoobar_process_start_time_seconds [0-9.]{10,}"),
} { } {
if !re.Match(buf.Bytes()) { if !re.Match(buf.Bytes()) {
t.Errorf("want body to match %s\n%s", re, buf.String()) t.Errorf("want body to match %s\n%s", re, buf.String())
} }
} }
brokenProcessCollector := NewProcessCollector(ProcessCollectorOpts{
PidFn: func() (int, error) { return 0, errors.New("boo") },
ReportErrors: true,
})
ch := make(chan Metric)
go func() {
brokenProcessCollector.Collect(ch)
close(ch)
}()
n := 0
for m := range ch {
n++
pb := &dto.Metric{}
err := m.Write(pb)
if err == nil {
t.Error("metric collected from broken process collector is unexpectedly valid")
}
}
if n != 1 {
t.Errorf("%d metrics collected, want 1", n)
}
} }

@ -0,0 +1,223 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package promauto provides constructors for the usual Prometheus metrics that
// return them already registered with the global registry
// (prometheus.DefaultRegisterer). This allows very compact code, avoiding any
// references to the registry altogether, but all the constructors in this
// package will panic if the registration fails.
//
// The following example is a complete program to create a histogram of normally
// distributed random numbers from the math/rand package:
//
// package main
//
// import (
// "math/rand"
// "net/http"
//
// "github.com/prometheus/client_golang/prometheus"
// "github.com/prometheus/client_golang/prometheus/promauto"
// "github.com/prometheus/client_golang/prometheus/promhttp"
// )
//
// var histogram = promauto.NewHistogram(prometheus.HistogramOpts{
// Name: "random_numbers",
// Help: "A histogram of normally distributed random numbers.",
// Buckets: prometheus.LinearBuckets(-3, .1, 61),
// })
//
// func Random() {
// for {
// histogram.Observe(rand.NormFloat64())
// }
// }
//
// func main() {
// go Random()
// http.Handle("/metrics", promhttp.Handler())
// http.ListenAndServe(":1971", nil)
// }
//
// Prometheus's version of a minimal hello-world program:
//
// package main
//
// import (
// "fmt"
// "net/http"
//
// "github.com/prometheus/client_golang/prometheus"
// "github.com/prometheus/client_golang/prometheus/promauto"
// "github.com/prometheus/client_golang/prometheus/promhttp"
// )
//
// func main() {
// http.Handle("/", promhttp.InstrumentHandlerCounter(
// promauto.NewCounterVec(
// prometheus.CounterOpts{
// Name: "hello_requests_total",
// Help: "Total number of hello-world requests by HTTP code.",
// },
// []string{"code"},
// ),
// http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// fmt.Fprint(w, "Hello, world!")
// }),
// ))
// http.Handle("/metrics", promhttp.Handler())
// http.ListenAndServe(":1971", nil)
// }
//
// This appears very handy. So why are these constructors locked away in a
// separate package? There are two caveats:
//
// First, in more complex programs, global state is often quite problematic.
// That's the reason why the metrics constructors in the prometheus package do
// not interact with the global prometheus.DefaultRegisterer on their own. You
// are free to use the Register or MustRegister functions to register them with
// the global prometheus.DefaultRegisterer, but you could as well choose a local
// Registerer (usually created with prometheus.NewRegistry, but there are other
// scenarios, e.g. testing).
//
// The second issue is that registration may fail, e.g. if a metric inconsistent
// with the newly to be registered one is already registered. But how to signal
// and handle a panic in the automatic registration with the default registry?
// The only way is panicking. While panicking on invalid input provided by the
// programmer is certainly fine, things are a bit more subtle in this case: You
// might just add another package to the program, and that package (in its init
// function) happens to register a metric with the same name as your code. Now,
// all of a sudden, either your code or the code of the newly imported package
// panics, depending on initialization order, without any opportunity to handle
// the case gracefully. Even worse is a scenario where registration happens
// later during the runtime (e.g. upon loading some kind of plugin), where the
// panic could be triggered long after the code has been deployed to
// production. A possibility to panic should be explicitly called out by the
// Must… idiom, cf. prometheus.MustRegister. But adding a separate set of
// constructors in the prometheus package called MustRegisterNewCounterVec or
// similar would be quite unwieldy. Adding an extra MustRegister method to each
// metric, returning the registered metric, would result in nice code for those
// using the method, but would pollute every single metric interface for
// everybody avoiding the global registry.
//
// To address both issues, the problematic auto-registering and possibly
// panicking constructors are all in this package with a clear warning
// ahead. And whoever cares about avoiding global state and possibly panicking
// function calls can simply ignore the existence of the promauto package
// altogether.
//
// A final note: There is a similar case in the net/http package of the standard
// library. It has DefaultServeMux as a global instance of ServeMux, and the
// Handle function acts on it, panicking if a handler for the same pattern has
// already been registered. However, one might argue that the whole HTTP routing
// is usually set up closely together in the same package or file, while
// Prometheus metrics tend to be spread widely over the codebase, increasing the
// chance of surprising registration failures. Furthermore, the use of global
// state in net/http has been criticized widely, and some avoid it altogether.
package promauto
import "github.com/prometheus/client_golang/prometheus"
// NewCounter works like the function of the same name in the prometheus package
// but it automatically registers the Counter with the
// prometheus.DefaultRegisterer. If the registration fails, NewCounter panics.
func NewCounter(opts prometheus.CounterOpts) prometheus.Counter {
c := prometheus.NewCounter(opts)
prometheus.MustRegister(c)
return c
}
// NewCounterVec works like the function of the same name in the prometheus
// package but it automatically registers the CounterVec with the
// prometheus.DefaultRegisterer. If the registration fails, NewCounterVec
// panics.
func NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec {
c := prometheus.NewCounterVec(opts, labelNames)
prometheus.MustRegister(c)
return c
}
// NewCounterFunc works like the function of the same name in the prometheus
// package but it automatically registers the CounterFunc with the
// prometheus.DefaultRegisterer. If the registration fails, NewCounterFunc
// panics.
func NewCounterFunc(opts prometheus.CounterOpts, function func() float64) prometheus.CounterFunc {
g := prometheus.NewCounterFunc(opts, function)
prometheus.MustRegister(g)
return g
}
// NewGauge works like the function of the same name in the prometheus package
// but it automatically registers the Gauge with the
// prometheus.DefaultRegisterer. If the registration fails, NewGauge panics.
func NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge {
g := prometheus.NewGauge(opts)
prometheus.MustRegister(g)
return g
}
// NewGaugeVec works like the function of the same name in the prometheus
// package but it automatically registers the GaugeVec with the
// prometheus.DefaultRegisterer. If the registration fails, NewGaugeVec panics.
func NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec {
g := prometheus.NewGaugeVec(opts, labelNames)
prometheus.MustRegister(g)
return g
}
// NewGaugeFunc works like the function of the same name in the prometheus
// package but it automatically registers the GaugeFunc with the
// prometheus.DefaultRegisterer. If the registration fails, NewGaugeFunc panics.
func NewGaugeFunc(opts prometheus.GaugeOpts, function func() float64) prometheus.GaugeFunc {
g := prometheus.NewGaugeFunc(opts, function)
prometheus.MustRegister(g)
return g
}
// NewSummary works like the function of the same name in the prometheus package
// but it automatically registers the Summary with the
// prometheus.DefaultRegisterer. If the registration fails, NewSummary panics.
func NewSummary(opts prometheus.SummaryOpts) prometheus.Summary {
s := prometheus.NewSummary(opts)
prometheus.MustRegister(s)
return s
}
// NewSummaryVec works like the function of the same name in the prometheus
// package but it automatically registers the SummaryVec with the
// prometheus.DefaultRegisterer. If the registration fails, NewSummaryVec
// panics.
func NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec {
s := prometheus.NewSummaryVec(opts, labelNames)
prometheus.MustRegister(s)
return s
}
// NewHistogram works like the function of the same name in the prometheus
// package but it automatically registers the Histogram with the
// prometheus.DefaultRegisterer. If the registration fails, NewHistogram panics.
func NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram {
h := prometheus.NewHistogram(opts)
prometheus.MustRegister(h)
return h
}
// NewHistogramVec works like the function of the same name in the prometheus
// package but it automatically registers the HistogramVec with the
// prometheus.DefaultRegisterer. If the registration fails, NewHistogramVec
// panics.
func NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec {
h := prometheus.NewHistogramVec(opts, labelNames)
prometheus.MustRegister(h)
return h
}

@ -0,0 +1,199 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promhttp
import (
"bufio"
"io"
"net"
"net/http"
)
const (
closeNotifier = 1 << iota
flusher
hijacker
readerFrom
pusher
)
type delegator interface {
http.ResponseWriter
Status() int
Written() int64
}
type responseWriterDelegator struct {
http.ResponseWriter
handler, method string
status int
written int64
wroteHeader bool
observeWriteHeader func(int)
}
func (r *responseWriterDelegator) Status() int {
return r.status
}
func (r *responseWriterDelegator) Written() int64 {
return r.written
}
func (r *responseWriterDelegator) WriteHeader(code int) {
r.status = code
r.wroteHeader = true
r.ResponseWriter.WriteHeader(code)
if r.observeWriteHeader != nil {
r.observeWriteHeader(code)
}
}
func (r *responseWriterDelegator) Write(b []byte) (int, error) {
if !r.wroteHeader {
r.WriteHeader(http.StatusOK)
}
n, err := r.ResponseWriter.Write(b)
r.written += int64(n)
return n, err
}
type closeNotifierDelegator struct{ *responseWriterDelegator }
type flusherDelegator struct{ *responseWriterDelegator }
type hijackerDelegator struct{ *responseWriterDelegator }
type readerFromDelegator struct{ *responseWriterDelegator }
func (d closeNotifierDelegator) CloseNotify() <-chan bool {
return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
func (d flusherDelegator) Flush() {
d.ResponseWriter.(http.Flusher).Flush()
}
func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return d.ResponseWriter.(http.Hijacker).Hijack()
}
func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
if !d.wroteHeader {
d.WriteHeader(http.StatusOK)
}
n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re)
d.written += n
return n, err
}
var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
func init() {
// TODO(beorn7): Code generation would help here.
pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0
return d
}
pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1
return closeNotifierDelegator{d}
}
pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2
return flusherDelegator{d}
}
pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3
return struct {
*responseWriterDelegator
http.Flusher
http.CloseNotifier
}{d, flusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4
return hijackerDelegator{d}
}
pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5
return struct {
*responseWriterDelegator
http.Hijacker
http.CloseNotifier
}{d, hijackerDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6
return struct {
*responseWriterDelegator
http.Hijacker
http.Flusher
}{d, hijackerDelegator{d}, flusherDelegator{d}}
}
pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7
return struct {
*responseWriterDelegator
http.Hijacker
http.Flusher
http.CloseNotifier
}{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8
return readerFromDelegator{d}
}
pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9
return struct {
*responseWriterDelegator
io.ReaderFrom
http.CloseNotifier
}{d, readerFromDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10
return struct {
*responseWriterDelegator
io.ReaderFrom
http.Flusher
}{d, readerFromDelegator{d}, flusherDelegator{d}}
}
pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11
return struct {
*responseWriterDelegator
io.ReaderFrom
http.Flusher
http.CloseNotifier
}{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12
return struct {
*responseWriterDelegator
io.ReaderFrom
http.Hijacker
}{d, readerFromDelegator{d}, hijackerDelegator{d}}
}
pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13
return struct {
*responseWriterDelegator
io.ReaderFrom
http.Hijacker
http.CloseNotifier
}{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14
return struct {
*responseWriterDelegator
io.ReaderFrom
http.Hijacker
http.Flusher
}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
}
pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15
return struct {
*responseWriterDelegator
io.ReaderFrom
http.Hijacker
http.Flusher
http.CloseNotifier
}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
}

@ -0,0 +1,181 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.8
package promhttp
import (
"io"
"net/http"
)
type pusherDelegator struct{ *responseWriterDelegator }
func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
return d.ResponseWriter.(http.Pusher).Push(target, opts)
}
func init() {
pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
return pusherDelegator{d}
}
pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
return struct {
*responseWriterDelegator
http.Pusher
http.CloseNotifier
}{d, pusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
return struct {
*responseWriterDelegator
http.Pusher
http.Flusher
}{d, pusherDelegator{d}, flusherDelegator{d}}
}
pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
return struct {
*responseWriterDelegator
http.Pusher
http.Flusher
http.CloseNotifier
}{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
return struct {
*responseWriterDelegator
http.Pusher
http.Hijacker
}{d, pusherDelegator{d}, hijackerDelegator{d}}
}
pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
return struct {
*responseWriterDelegator
http.Pusher
http.Hijacker
http.CloseNotifier
}{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
return struct {
*responseWriterDelegator
http.Pusher
http.Hijacker
http.Flusher
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
}
pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
return struct {
*responseWriterDelegator
http.Pusher
http.Hijacker
http.Flusher
http.CloseNotifier
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
}{d, pusherDelegator{d}, readerFromDelegator{d}}
}
pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.CloseNotifier
}{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Flusher
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
}
pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Flusher
http.CloseNotifier
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Hijacker
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
}
pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Hijacker
http.CloseNotifier
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Hijacker
http.Flusher
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
}
pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Hijacker
http.Flusher
http.CloseNotifier
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
}
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
d := &responseWriterDelegator{
ResponseWriter: w,
observeWriteHeader: observeWriteHeaderFunc,
}
id := 0
if _, ok := w.(http.CloseNotifier); ok {
id += closeNotifier
}
if _, ok := w.(http.Flusher); ok {
id += flusher
}
if _, ok := w.(http.Hijacker); ok {
id += hijacker
}
if _, ok := w.(io.ReaderFrom); ok {
id += readerFrom
}
if _, ok := w.(http.Pusher); ok {
id += pusher
}
return pickDelegator[id](d)
}

@ -0,0 +1,44 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !go1.8
package promhttp
import (
"io"
"net/http"
)
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
d := &responseWriterDelegator{
ResponseWriter: w,
observeWriteHeader: observeWriteHeaderFunc,
}
id := 0
if _, ok := w.(http.CloseNotifier); ok {
id += closeNotifier
}
if _, ok := w.(http.Flusher); ok {
id += flusher
}
if _, ok := w.(http.Hijacker); ok {
id += hijacker
}
if _, ok := w.(io.ReaderFrom); ok {
id += readerFrom
}
return pickDelegator[id](d)
}

@ -11,21 +11,24 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// Copyright (c) 2013, The Prometheus Authors // Package promhttp provides tooling around HTTP servers and clients.
// All rights reserved.
// //
// Use of this source code is governed by a BSD-style license that can be found // First, the package allows the creation of http.Handler instances to expose
// in the LICENSE file. // Prometheus metrics via HTTP. promhttp.Handler acts on the
// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a
// Package promhttp contains functions to create http.Handler instances to // custom registry or anything that implements the Gatherer interface. It also
// expose Prometheus metrics via HTTP. In later versions of this package, it // allows the creation of handlers that act differently on errors or allow to
// will also contain tooling to instrument instances of http.Handler and // log errors.
// http.RoundTripper. //
// Second, the package provides tooling to instrument instances of http.Handler
// via middleware. Middleware wrappers follow the naming scheme
// InstrumentHandlerX, where X describes the intended use of the middleware.
// See each function's doc comment for specific details.
// //
// promhttp.Handler acts on the prometheus.DefaultGatherer. With HandlerFor, // Finally, the package allows for an http.RoundTripper to be instrumented via
// you can create a handler for a custom registry or anything that implements // middleware. Middleware wrappers follow the naming scheme
// the Gatherer interface. It also allows to create handlers that act // InstrumentRoundTripperX, where X describes the intended use of the
// differently on errors or allow to log errors. // middleware. See each function's doc comment for specific details.
package promhttp package promhttp
import ( import (
@ -36,6 +39,7 @@ import (
"net/http" "net/http"
"strings" "strings"
"sync" "sync"
"time"
"github.com/prometheus/common/expfmt" "github.com/prometheus/common/expfmt"
@ -64,21 +68,51 @@ func giveBuf(buf *bytes.Buffer) {
bufPool.Put(buf) bufPool.Put(buf)
} }
// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The // Handler returns an http.Handler for the prometheus.DefaultGatherer, using
// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP // default HandlerOpts, i.e. it reports the first error as an HTTP error, it has
// error, no error logging, and compression if requested by the client. // no error logging, and it applies compression if requested by the client.
//
// The returned http.Handler is already instrumented using the
// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you
// create multiple http.Handlers by separate calls of the Handler function, the
// metrics used for instrumentation will be shared between them, providing
// global scrape counts.
// //
// If you want to create a Handler for the DefaultGatherer with different // This function is meant to cover the bulk of basic use cases. If you are doing
// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and // anything that requires more customization (including using a non-default
// your desired HandlerOpts. // Gatherer, different instrumentation, and non-default HandlerOpts), use the
// HandlerFor function. See there for details.
func Handler() http.Handler { func Handler() http.Handler {
return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}) return InstrumentMetricHandler(
prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}),
)
} }
// HandlerFor returns an http.Handler for the provided Gatherer. The behavior // HandlerFor returns an uninstrumented http.Handler for the provided
// of the Handler is defined by the provided HandlerOpts. // Gatherer. The behavior of the Handler is defined by the provided
// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom
// Gatherers, with non-default HandlerOpts, and/or with custom (or no)
// instrumentation. Use the InstrumentMetricHandler function to apply the same
// kind of instrumentation as it is used by the Handler function.
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { var inFlightSem chan struct{}
if opts.MaxRequestsInFlight > 0 {
inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
}
h := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
if inFlightSem != nil {
select {
case inFlightSem <- struct{}{}: // All good, carry on.
defer func() { <-inFlightSem }()
default:
http.Error(w, fmt.Sprintf(
"Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight,
), http.StatusServiceUnavailable)
return
}
}
mfs, err := reg.Gather() mfs, err := reg.Gather()
if err != nil { if err != nil {
if opts.ErrorLog != nil { if opts.ErrorLog != nil {
@ -125,7 +159,7 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
closer.Close() closer.Close()
} }
if lastErr != nil && buf.Len() == 0 { if lastErr != nil && buf.Len() == 0 {
http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError) http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError)
return return
} }
header := w.Header() header := w.Header()
@ -134,9 +168,70 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
if encoding != "" { if encoding != "" {
header.Set(contentEncodingHeader, encoding) header.Set(contentEncodingHeader, encoding)
} }
w.Write(buf.Bytes()) if _, err := w.Write(buf.Bytes()); err != nil && opts.ErrorLog != nil {
opts.ErrorLog.Println("error while sending encoded metrics:", err)
}
// TODO(beorn7): Consider streaming serving of metrics. // TODO(beorn7): Consider streaming serving of metrics.
}) })
if opts.Timeout <= 0 {
return h
}
return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf(
"Exceeded configured timeout of %v.\n",
opts.Timeout,
))
}
// InstrumentMetricHandler is usually used with an http.Handler returned by the
// HandlerFor function. It instruments the provided http.Handler with two
// metrics: A counter vector "promhttp_metric_handler_requests_total" to count
// scrapes partitioned by HTTP status code, and a gauge
// "promhttp_metric_handler_requests_in_flight" to track the number of
// simultaneous scrapes. This function idempotently registers collectors for
// both metrics with the provided Registerer. It panics if the registration
// fails. The provided metrics are useful to see how many scrapes hit the
// monitored target (which could be from different Prometheus servers or other
// scrapers), and how often they overlap (which would result in more than one
// scrape in flight at the same time). Note that the scrapes-in-flight gauge
// will contain the scrape by which it is exposed, while the scrape counter will
// only get incremented after the scrape is complete (as only then the status
// code is known). For tracking scrape durations, use the
// "scrape_duration_seconds" gauge created by the Prometheus server upon each
// scrape.
func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler {
cnt := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "promhttp_metric_handler_requests_total",
Help: "Total number of scrapes by HTTP status code.",
},
[]string{"code"},
)
// Initialize the most likely HTTP status codes.
cnt.WithLabelValues("200")
cnt.WithLabelValues("500")
cnt.WithLabelValues("503")
if err := reg.Register(cnt); err != nil {
if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
cnt = are.ExistingCollector.(*prometheus.CounterVec)
} else {
panic(err)
}
}
gge := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "promhttp_metric_handler_requests_in_flight",
Help: "Current number of scrapes being served.",
})
if err := reg.Register(gge); err != nil {
if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
gge = are.ExistingCollector.(prometheus.Gauge)
} else {
panic(err)
}
}
return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler))
} }
// HandlerErrorHandling defines how a Handler serving metrics will handle // HandlerErrorHandling defines how a Handler serving metrics will handle
@ -180,6 +275,21 @@ type HandlerOpts struct {
// If DisableCompression is true, the handler will never compress the // If DisableCompression is true, the handler will never compress the
// response, even if requested by the client. // response, even if requested by the client.
DisableCompression bool DisableCompression bool
// The number of concurrent HTTP requests is limited to
// MaxRequestsInFlight. Additional requests are responded to with 503
// Service Unavailable and a suitable message in the body. If
// MaxRequestsInFlight is 0 or negative, no limit is applied.
MaxRequestsInFlight int
// If handling a request takes longer than Timeout, it is responded to
// with 503 ServiceUnavailable and a suitable Message. No timeout is
// applied if Timeout is 0 or negative. Note that with the current
// implementation, reaching the timeout simply ends the HTTP requests as
// described above (and even that only if sending of the body hasn't
// started yet), while the bulk work of gathering all the metrics keeps
// running in the background (with the eventual result to be thrown
// away). Until the implementation is improved, it is recommended to
// implement a separate timeout in potentially slow Collectors.
Timeout time.Duration
} }
// decorateWriter wraps a writer to handle gzip compression if requested. It // decorateWriter wraps a writer to handle gzip compression if requested. It
@ -192,7 +302,7 @@ func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled
header := request.Header.Get(acceptEncodingHeader) header := request.Header.Get(acceptEncodingHeader)
parts := strings.Split(header, ",") parts := strings.Split(header, ",")
for _, part := range parts { for _, part := range parts {
part := strings.TrimSpace(part) part = strings.TrimSpace(part)
if part == "gzip" || strings.HasPrefix(part, "gzip;") { if part == "gzip" || strings.HasPrefix(part, "gzip;") {
return gzip.NewWriter(writer), "gzip" return gzip.NewWriter(writer), "gzip"
} }

@ -11,12 +11,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// Copyright (c) 2013, The Prometheus Authors
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
package promhttp package promhttp
import ( import (
@ -25,7 +19,9 @@ import (
"log" "log"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"strings"
"testing" "testing"
"time"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
@ -43,6 +39,23 @@ func (e errorCollector) Collect(ch chan<- prometheus.Metric) {
) )
} }
type blockingCollector struct {
CollectStarted, Block chan struct{}
}
func (b blockingCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- prometheus.NewDesc("dummy_desc", "not helpful", nil, nil)
}
func (b blockingCollector) Collect(ch chan<- prometheus.Metric) {
select {
case b.CollectStarted <- struct{}{}:
default:
}
// Collects nothing, just waits for a channel receive.
<-b.Block
}
func TestHandlerErrorHandling(t *testing.T) { func TestHandlerErrorHandling(t *testing.T) {
// Create a registry that collects a MetricFamily with two elements, // Create a registry that collects a MetricFamily with two elements,
@ -108,7 +121,7 @@ the_count 0
t.Errorf("got HTTP status code %d, want %d", got, want) t.Errorf("got HTTP status code %d, want %d", got, want)
} }
if got := logBuf.String(); got != wantMsg { if got := logBuf.String(); got != wantMsg {
t.Errorf("got log message:\n%s\nwant log mesage:\n%s\n", got, wantMsg) t.Errorf("got log message:\n%s\nwant log message:\n%s\n", got, wantMsg)
} }
if got := writer.Body.String(); got != wantErrorBody { if got := writer.Body.String(); got != wantErrorBody {
t.Errorf("got body:\n%s\nwant body:\n%s\n", got, wantErrorBody) t.Errorf("got body:\n%s\nwant body:\n%s\n", got, wantErrorBody)
@ -135,3 +148,103 @@ the_count 0
}() }()
panicHandler.ServeHTTP(writer, request) panicHandler.ServeHTTP(writer, request)
} }
func TestInstrumentMetricHandler(t *testing.T) {
reg := prometheus.NewRegistry()
handler := InstrumentMetricHandler(reg, HandlerFor(reg, HandlerOpts{}))
// Do it again to test idempotency.
InstrumentMetricHandler(reg, HandlerFor(reg, HandlerOpts{}))
writer := httptest.NewRecorder()
request, _ := http.NewRequest("GET", "/", nil)
request.Header.Add("Accept", "test/plain")
handler.ServeHTTP(writer, request)
if got, want := writer.Code, http.StatusOK; got != want {
t.Errorf("got HTTP status code %d, want %d", got, want)
}
want := "promhttp_metric_handler_requests_in_flight 1\n"
if got := writer.Body.String(); !strings.Contains(got, want) {
t.Errorf("got body %q, does not contain %q", got, want)
}
want = "promhttp_metric_handler_requests_total{code=\"200\"} 0\n"
if got := writer.Body.String(); !strings.Contains(got, want) {
t.Errorf("got body %q, does not contain %q", got, want)
}
writer.Body.Reset()
handler.ServeHTTP(writer, request)
if got, want := writer.Code, http.StatusOK; got != want {
t.Errorf("got HTTP status code %d, want %d", got, want)
}
want = "promhttp_metric_handler_requests_in_flight 1\n"
if got := writer.Body.String(); !strings.Contains(got, want) {
t.Errorf("got body %q, does not contain %q", got, want)
}
want = "promhttp_metric_handler_requests_total{code=\"200\"} 1\n"
if got := writer.Body.String(); !strings.Contains(got, want) {
t.Errorf("got body %q, does not contain %q", got, want)
}
}
func TestHandlerMaxRequestsInFlight(t *testing.T) {
reg := prometheus.NewRegistry()
handler := HandlerFor(reg, HandlerOpts{MaxRequestsInFlight: 1})
w1 := httptest.NewRecorder()
w2 := httptest.NewRecorder()
w3 := httptest.NewRecorder()
request, _ := http.NewRequest("GET", "/", nil)
request.Header.Add("Accept", "test/plain")
c := blockingCollector{Block: make(chan struct{}), CollectStarted: make(chan struct{}, 1)}
reg.MustRegister(c)
rq1Done := make(chan struct{})
go func() {
handler.ServeHTTP(w1, request)
close(rq1Done)
}()
<-c.CollectStarted
handler.ServeHTTP(w2, request)
if got, want := w2.Code, http.StatusServiceUnavailable; got != want {
t.Errorf("got HTTP status code %d, want %d", got, want)
}
if got, want := w2.Body.String(), "Limit of concurrent requests reached (1), try again later.\n"; got != want {
t.Errorf("got body %q, want %q", got, want)
}
close(c.Block)
<-rq1Done
handler.ServeHTTP(w3, request)
if got, want := w3.Code, http.StatusOK; got != want {
t.Errorf("got HTTP status code %d, want %d", got, want)
}
}
func TestHandlerTimeout(t *testing.T) {
reg := prometheus.NewRegistry()
handler := HandlerFor(reg, HandlerOpts{Timeout: time.Millisecond})
w := httptest.NewRecorder()
request, _ := http.NewRequest("GET", "/", nil)
request.Header.Add("Accept", "test/plain")
c := blockingCollector{Block: make(chan struct{}), CollectStarted: make(chan struct{}, 1)}
reg.MustRegister(c)
handler.ServeHTTP(w, request)
if got, want := w.Code, http.StatusServiceUnavailable; got != want {
t.Errorf("got HTTP status code %d, want %d", got, want)
}
if got, want := w.Body.String(), "Exceeded configured timeout of 1ms.\n"; got != want {
t.Errorf("got body %q, want %q", got, want)
}
close(c.Block) // To not leak a goroutine.
}

@ -0,0 +1,97 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promhttp
import (
"net/http"
"time"
"github.com/prometheus/client_golang/prometheus"
)
// The RoundTripperFunc type is an adapter to allow the use of ordinary
// functions as RoundTrippers. If f is a function with the appropriate
// signature, RountTripperFunc(f) is a RoundTripper that calls f.
type RoundTripperFunc func(req *http.Request) (*http.Response, error)
// RoundTrip implements the RoundTripper interface.
func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
return rt(r)
}
// InstrumentRoundTripperInFlight is a middleware that wraps the provided
// http.RoundTripper. It sets the provided prometheus.Gauge to the number of
// requests currently handled by the wrapped http.RoundTripper.
//
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
gauge.Inc()
defer gauge.Dec()
return next.RoundTrip(r)
})
}
// InstrumentRoundTripperCounter is a middleware that wraps the provided
// http.RoundTripper to observe the request result with the provided CounterVec.
// The CounterVec must have zero, one, or two non-const non-curried labels. For
// those, the only allowed label names are "code" and "method". The function
// panics otherwise. Partitioning of the CounterVec happens by HTTP status code
// and/or HTTP method if the respective instance label names are present in the
// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
//
// If the wrapped RoundTripper panics or returns a non-nil error, the Counter
// is not incremented.
//
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc {
code, method := checkLabels(counter)
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
resp, err := next.RoundTrip(r)
if err == nil {
counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc()
}
return resp, err
})
}
// InstrumentRoundTripperDuration is a middleware that wraps the provided
// http.RoundTripper to observe the request duration with the provided
// ObserverVec. The ObserverVec must have zero, one, or two non-const
// non-curried labels. For those, the only allowed label names are "code" and
// "method". The function panics otherwise. The Observe method of the Observer
// in the ObserverVec is called with the request duration in
// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
// respective instance label names are present in the ObserverVec. For
// unpartitioned observations, use an ObserverVec with zero labels. Note that
// partitioning of Histograms is expensive and should be used judiciously.
//
// If the wrapped RoundTripper panics or returns a non-nil error, no values are
// reported.
//
// Note that this method is only guaranteed to never observe negative durations
// if used with Go1.9+.
func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc {
code, method := checkLabels(obs)
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
start := time.Now()
resp, err := next.RoundTrip(r)
if err == nil {
obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds())
}
return resp, err
})
}

@ -0,0 +1,144 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.8
package promhttp
import (
"context"
"crypto/tls"
"net/http"
"net/http/httptrace"
"time"
)
// InstrumentTrace is used to offer flexibility in instrumenting the available
// httptrace.ClientTrace hook functions. Each function is passed a float64
// representing the time in seconds since the start of the http request. A user
// may choose to use separately buckets Histograms, or implement custom
// instance labels on a per function basis.
type InstrumentTrace struct {
GotConn func(float64)
PutIdleConn func(float64)
GotFirstResponseByte func(float64)
Got100Continue func(float64)
DNSStart func(float64)
DNSDone func(float64)
ConnectStart func(float64)
ConnectDone func(float64)
TLSHandshakeStart func(float64)
TLSHandshakeDone func(float64)
WroteHeaders func(float64)
Wait100Continue func(float64)
WroteRequest func(float64)
}
// InstrumentRoundTripperTrace is a middleware that wraps the provided
// RoundTripper and reports times to hook functions provided in the
// InstrumentTrace struct. Hook functions that are not present in the provided
// InstrumentTrace struct are ignored. Times reported to the hook functions are
// time since the start of the request. Only with Go1.9+, those times are
// guaranteed to never be negative. (Earlier Go versions are not using a
// monotonic clock.) Note that partitioning of Histograms is expensive and
// should be used judiciously.
//
// For hook functions that receive an error as an argument, no observations are
// made in the event of a non-nil error value.
//
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
start := time.Now()
trace := &httptrace.ClientTrace{
GotConn: func(_ httptrace.GotConnInfo) {
if it.GotConn != nil {
it.GotConn(time.Since(start).Seconds())
}
},
PutIdleConn: func(err error) {
if err != nil {
return
}
if it.PutIdleConn != nil {
it.PutIdleConn(time.Since(start).Seconds())
}
},
DNSStart: func(_ httptrace.DNSStartInfo) {
if it.DNSStart != nil {
it.DNSStart(time.Since(start).Seconds())
}
},
DNSDone: func(_ httptrace.DNSDoneInfo) {
if it.DNSDone != nil {
it.DNSDone(time.Since(start).Seconds())
}
},
ConnectStart: func(_, _ string) {
if it.ConnectStart != nil {
it.ConnectStart(time.Since(start).Seconds())
}
},
ConnectDone: func(_, _ string, err error) {
if err != nil {
return
}
if it.ConnectDone != nil {
it.ConnectDone(time.Since(start).Seconds())
}
},
GotFirstResponseByte: func() {
if it.GotFirstResponseByte != nil {
it.GotFirstResponseByte(time.Since(start).Seconds())
}
},
Got100Continue: func() {
if it.Got100Continue != nil {
it.Got100Continue(time.Since(start).Seconds())
}
},
TLSHandshakeStart: func() {
if it.TLSHandshakeStart != nil {
it.TLSHandshakeStart(time.Since(start).Seconds())
}
},
TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
if err != nil {
return
}
if it.TLSHandshakeDone != nil {
it.TLSHandshakeDone(time.Since(start).Seconds())
}
},
WroteHeaders: func() {
if it.WroteHeaders != nil {
it.WroteHeaders(time.Since(start).Seconds())
}
},
Wait100Continue: func() {
if it.Wait100Continue != nil {
it.Wait100Continue(time.Since(start).Seconds())
}
},
WroteRequest: func(_ httptrace.WroteRequestInfo) {
if it.WroteRequest != nil {
it.WroteRequest(time.Since(start).Seconds())
}
},
}
r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace))
return next.RoundTrip(r)
})
}

@ -0,0 +1,195 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.8
package promhttp
import (
"log"
"net/http"
"testing"
"time"
"github.com/prometheus/client_golang/prometheus"
)
func TestClientMiddlewareAPI(t *testing.T) {
client := http.DefaultClient
client.Timeout = 1 * time.Second
reg := prometheus.NewRegistry()
inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "client_in_flight_requests",
Help: "A gauge of in-flight requests for the wrapped client.",
})
counter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "client_api_requests_total",
Help: "A counter for requests from the wrapped client.",
},
[]string{"code", "method"},
)
dnsLatencyVec := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "dns_duration_seconds",
Help: "Trace dns latency histogram.",
Buckets: []float64{.005, .01, .025, .05},
},
[]string{"event"},
)
tlsLatencyVec := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "tls_duration_seconds",
Help: "Trace tls latency histogram.",
Buckets: []float64{.05, .1, .25, .5},
},
[]string{"event"},
)
histVec := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "request_duration_seconds",
Help: "A histogram of request latencies.",
Buckets: prometheus.DefBuckets,
},
[]string{"method"},
)
reg.MustRegister(counter, tlsLatencyVec, dnsLatencyVec, histVec, inFlightGauge)
trace := &InstrumentTrace{
DNSStart: func(t float64) {
dnsLatencyVec.WithLabelValues("dns_start")
},
DNSDone: func(t float64) {
dnsLatencyVec.WithLabelValues("dns_done")
},
TLSHandshakeStart: func(t float64) {
tlsLatencyVec.WithLabelValues("tls_handshake_start")
},
TLSHandshakeDone: func(t float64) {
tlsLatencyVec.WithLabelValues("tls_handshake_done")
},
}
client.Transport = InstrumentRoundTripperInFlight(inFlightGauge,
InstrumentRoundTripperCounter(counter,
InstrumentRoundTripperTrace(trace,
InstrumentRoundTripperDuration(histVec, http.DefaultTransport),
),
),
)
resp, err := client.Get("http://google.com")
if err != nil {
t.Fatalf("%v", err)
}
defer resp.Body.Close()
}
func ExampleInstrumentRoundTripperDuration() {
client := http.DefaultClient
client.Timeout = 1 * time.Second
inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "client_in_flight_requests",
Help: "A gauge of in-flight requests for the wrapped client.",
})
counter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "client_api_requests_total",
Help: "A counter for requests from the wrapped client.",
},
[]string{"code", "method"},
)
// dnsLatencyVec uses custom buckets based on expected dns durations.
// It has an instance label "event", which is set in the
// DNSStart and DNSDonehook functions defined in the
// InstrumentTrace struct below.
dnsLatencyVec := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "dns_duration_seconds",
Help: "Trace dns latency histogram.",
Buckets: []float64{.005, .01, .025, .05},
},
[]string{"event"},
)
// tlsLatencyVec uses custom buckets based on expected tls durations.
// It has an instance label "event", which is set in the
// TLSHandshakeStart and TLSHandshakeDone hook functions defined in the
// InstrumentTrace struct below.
tlsLatencyVec := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "tls_duration_seconds",
Help: "Trace tls latency histogram.",
Buckets: []float64{.05, .1, .25, .5},
},
[]string{"event"},
)
// histVec has no labels, making it a zero-dimensional ObserverVec.
histVec := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "request_duration_seconds",
Help: "A histogram of request latencies.",
Buckets: prometheus.DefBuckets,
},
[]string{},
)
// Register all of the metrics in the standard registry.
prometheus.MustRegister(counter, tlsLatencyVec, dnsLatencyVec, histVec, inFlightGauge)
// Define functions for the available httptrace.ClientTrace hook
// functions that we want to instrument.
trace := &InstrumentTrace{
DNSStart: func(t float64) {
dnsLatencyVec.WithLabelValues("dns_start")
},
DNSDone: func(t float64) {
dnsLatencyVec.WithLabelValues("dns_done")
},
TLSHandshakeStart: func(t float64) {
tlsLatencyVec.WithLabelValues("tls_handshake_start")
},
TLSHandshakeDone: func(t float64) {
tlsLatencyVec.WithLabelValues("tls_handshake_done")
},
}
// Wrap the default RoundTripper with middleware.
roundTripper := InstrumentRoundTripperInFlight(inFlightGauge,
InstrumentRoundTripperCounter(counter,
InstrumentRoundTripperTrace(trace,
InstrumentRoundTripperDuration(histVec, http.DefaultTransport),
),
),
)
// Set the RoundTripper on our client.
client.Transport = roundTripper
resp, err := client.Get("http://google.com")
if err != nil {
log.Printf("error: %v", err)
}
defer resp.Body.Close()
}

@ -0,0 +1,447 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promhttp
import (
"errors"
"net/http"
"strconv"
"strings"
"time"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/prometheus"
)
// magicString is used for the hacky label test in checkLabels. Remove once fixed.
const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
// InstrumentHandlerInFlight is a middleware that wraps the provided
// http.Handler. It sets the provided prometheus.Gauge to the number of
// requests currently handled by the wrapped http.Handler.
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
g.Inc()
defer g.Dec()
next.ServeHTTP(w, r)
})
}
// InstrumentHandlerDuration is a middleware that wraps the provided
// http.Handler to observe the request duration with the provided ObserverVec.
// The ObserverVec must have zero, one, or two non-const non-curried labels. For
// those, the only allowed label names are "code" and "method". The function
// panics otherwise. The Observe method of the Observer in the ObserverVec is
// called with the request duration in seconds. Partitioning happens by HTTP
// status code and/or HTTP method if the respective instance label names are
// present in the ObserverVec. For unpartitioned observations, use an
// ObserverVec with zero labels. Note that partitioning of Histograms is
// expensive and should be used judiciously.
//
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
//
// If the wrapped Handler panics, no values are reported.
//
// Note that this method is only guaranteed to never observe negative durations
// if used with Go1.9+.
func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
code, method := checkLabels(obs)
if code {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds())
})
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
next.ServeHTTP(w, r)
obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds())
})
}
// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
// to observe the request result with the provided CounterVec. The CounterVec
// must have zero, one, or two non-const non-curried labels. For those, the only
// allowed label names are "code" and "method". The function panics
// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or
// HTTP method if the respective instance label names are present in the
// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
//
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
//
// If the wrapped Handler panics, the Counter is not incremented.
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc {
code, method := checkLabels(counter)
if code {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
counter.With(labels(code, method, r.Method, d.Status())).Inc()
})
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
counter.With(labels(code, method, r.Method, 0)).Inc()
})
}
// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
// http.Handler to observe with the provided ObserverVec the request duration
// until the response headers are written. The ObserverVec must have zero, one,
// or two non-const non-curried labels. For those, the only allowed label names
// are "code" and "method". The function panics otherwise. The Observe method of
// the Observer in the ObserverVec is called with the request duration in
// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
// respective instance label names are present in the ObserverVec. For
// unpartitioned observations, use an ObserverVec with zero labels. Note that
// partitioning of Histograms is expensive and should be used judiciously.
//
// If the wrapped Handler panics before calling WriteHeader, no value is
// reported.
//
// Note that this method is only guaranteed to never observe negative durations
// if used with Go1.9+.
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
code, method := checkLabels(obs)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
d := newDelegator(w, func(status int) {
obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds())
})
next.ServeHTTP(d, r)
})
}
// InstrumentHandlerRequestSize is a middleware that wraps the provided
// http.Handler to observe the request size with the provided ObserverVec. The
// ObserverVec must have zero, one, or two non-const non-curried labels. For
// those, the only allowed label names are "code" and "method". The function
// panics otherwise. The Observe method of the Observer in the ObserverVec is
// called with the request size in bytes. Partitioning happens by HTTP status
// code and/or HTTP method if the respective instance label names are present in
// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
// labels. Note that partitioning of Histograms is expensive and should be used
// judiciously.
//
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
//
// If the wrapped Handler panics, no values are reported.
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
code, method := checkLabels(obs)
if code {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
size := computeApproximateRequestSize(r)
obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size))
})
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
size := computeApproximateRequestSize(r)
obs.With(labels(code, method, r.Method, 0)).Observe(float64(size))
})
}
// InstrumentHandlerResponseSize is a middleware that wraps the provided
// http.Handler to observe the response size with the provided ObserverVec. The
// ObserverVec must have zero, one, or two non-const non-curried labels. For
// those, the only allowed label names are "code" and "method". The function
// panics otherwise. The Observe method of the Observer in the ObserverVec is
// called with the response size in bytes. Partitioning happens by HTTP status
// code and/or HTTP method if the respective instance label names are present in
// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
// labels. Note that partitioning of Histograms is expensive and should be used
// judiciously.
//
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
//
// If the wrapped Handler panics, no values are reported.
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler {
code, method := checkLabels(obs)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written()))
})
}
func checkLabels(c prometheus.Collector) (code bool, method bool) {
// TODO(beorn7): Remove this hacky way to check for instance labels
// once Descriptors can have their dimensionality queried.
var (
desc *prometheus.Desc
m prometheus.Metric
pm dto.Metric
lvs []string
)
// Get the Desc from the Collector.
descc := make(chan *prometheus.Desc, 1)
c.Describe(descc)
select {
case desc = <-descc:
default:
panic("no description provided by collector")
}
select {
case <-descc:
panic("more than one description provided by collector")
default:
}
close(descc)
// Create a ConstMetric with the Desc. Since we don't know how many
// variable labels there are, try for as long as it needs.
for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) {
m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...)
}
// Write out the metric into a proto message and look at the labels.
// If the value is not the magicString, it is a constLabel, which doesn't interest us.
// If the label is curried, it doesn't interest us.
// In all other cases, only "code" or "method" is allowed.
if err := m.Write(&pm); err != nil {
panic("error checking metric for labels")
}
for _, label := range pm.Label {
name, value := label.GetName(), label.GetValue()
if value != magicString || isLabelCurried(c, name) {
continue
}
switch name {
case "code":
code = true
case "method":
method = true
default:
panic("metric partitioned with non-supported labels")
}
}
return
}
func isLabelCurried(c prometheus.Collector, label string) bool {
// This is even hackier than the label test above.
// We essentially try to curry again and see if it works.
// But for that, we need to type-convert to the two
// types we use here, ObserverVec or *CounterVec.
switch v := c.(type) {
case *prometheus.CounterVec:
if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
return false
}
case prometheus.ObserverVec:
if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
return false
}
default:
panic("unsupported metric vec type")
}
return true
}
// emptyLabels is a one-time allocation for non-partitioned metrics to avoid
// unnecessary allocations on each request.
var emptyLabels = prometheus.Labels{}
func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
if !(code || method) {
return emptyLabels
}
labels := prometheus.Labels{}
if code {
labels["code"] = sanitizeCode(status)
}
if method {
labels["method"] = sanitizeMethod(reqMethod)
}
return labels
}
func computeApproximateRequestSize(r *http.Request) int {
s := 0
if r.URL != nil {
s += len(r.URL.String())
}
s += len(r.Method)
s += len(r.Proto)
for name, values := range r.Header {
s += len(name)
for _, value := range values {
s += len(value)
}
}
s += len(r.Host)
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
if r.ContentLength != -1 {
s += int(r.ContentLength)
}
return s
}
func sanitizeMethod(m string) string {
switch m {
case "GET", "get":
return "get"
case "PUT", "put":
return "put"
case "HEAD", "head":
return "head"
case "POST", "post":
return "post"
case "DELETE", "delete":
return "delete"
case "CONNECT", "connect":
return "connect"
case "OPTIONS", "options":
return "options"
case "NOTIFY", "notify":
return "notify"
default:
return strings.ToLower(m)
}
}
// If the wrapped http.Handler has not set a status code, i.e. the value is
// currently 0, santizeCode will return 200, for consistency with behavior in
// the stdlib.
func sanitizeCode(s int) string {
switch s {
case 100:
return "100"
case 101:
return "101"
case 200, 0:
return "200"
case 201:
return "201"
case 202:
return "202"
case 203:
return "203"
case 204:
return "204"
case 205:
return "205"
case 206:
return "206"
case 300:
return "300"
case 301:
return "301"
case 302:
return "302"
case 304:
return "304"
case 305:
return "305"
case 307:
return "307"
case 400:
return "400"
case 401:
return "401"
case 402:
return "402"
case 403:
return "403"
case 404:
return "404"
case 405:
return "405"
case 406:
return "406"
case 407:
return "407"
case 408:
return "408"
case 409:
return "409"
case 410:
return "410"
case 411:
return "411"
case 412:
return "412"
case 413:
return "413"
case 414:
return "414"
case 415:
return "415"
case 416:
return "416"
case 417:
return "417"
case 418:
return "418"
case 500:
return "500"
case 501:
return "501"
case 502:
return "502"
case 503:
return "503"
case 504:
return "504"
case 505:
return "505"
case 428:
return "428"
case 429:
return "429"
case 431:
return "431"
case 511:
return "511"
default:
return strconv.Itoa(s)
}
}

@ -0,0 +1,401 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promhttp
import (
"io"
"log"
"net/http"
"net/http/httptest"
"testing"
"github.com/prometheus/client_golang/prometheus"
)
func TestLabelCheck(t *testing.T) {
scenarios := map[string]struct {
varLabels []string
constLabels []string
curriedLabels []string
ok bool
}{
"empty": {
varLabels: []string{},
constLabels: []string{},
curriedLabels: []string{},
ok: true,
},
"code as single var label": {
varLabels: []string{"code"},
constLabels: []string{},
curriedLabels: []string{},
ok: true,
},
"method as single var label": {
varLabels: []string{"method"},
constLabels: []string{},
curriedLabels: []string{},
ok: true,
},
"cade and method as var labels": {
varLabels: []string{"method", "code"},
constLabels: []string{},
curriedLabels: []string{},
ok: true,
},
"valid case with all labels used": {
varLabels: []string{"code", "method"},
constLabels: []string{"foo", "bar"},
curriedLabels: []string{"dings", "bums"},
ok: true,
},
"unsupported var label": {
varLabels: []string{"foo"},
constLabels: []string{},
curriedLabels: []string{},
ok: false,
},
"mixed var labels": {
varLabels: []string{"method", "foo", "code"},
constLabels: []string{},
curriedLabels: []string{},
ok: false,
},
"unsupported var label but curried": {
varLabels: []string{},
constLabels: []string{},
curriedLabels: []string{"foo"},
ok: true,
},
"mixed var labels but unsupported curried": {
varLabels: []string{"code", "method"},
constLabels: []string{},
curriedLabels: []string{"foo"},
ok: true,
},
"supported label as const and curry": {
varLabels: []string{},
constLabels: []string{"code"},
curriedLabels: []string{"method"},
ok: true,
},
"supported label as const and curry with unsupported as var": {
varLabels: []string{"foo"},
constLabels: []string{"code"},
curriedLabels: []string{"method"},
ok: false,
},
}
for name, sc := range scenarios {
t.Run(name, func(t *testing.T) {
constLabels := prometheus.Labels{}
for _, l := range sc.constLabels {
constLabels[l] = "dummy"
}
c := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "c",
Help: "c help",
ConstLabels: constLabels,
},
append(sc.varLabels, sc.curriedLabels...),
)
o := prometheus.ObserverVec(prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "c",
Help: "c help",
ConstLabels: constLabels,
},
append(sc.varLabels, sc.curriedLabels...),
))
for _, l := range sc.curriedLabels {
c = c.MustCurryWith(prometheus.Labels{l: "dummy"})
o = o.MustCurryWith(prometheus.Labels{l: "dummy"})
}
func() {
defer func() {
if err := recover(); err != nil {
if sc.ok {
t.Error("unexpected panic:", err)
}
} else if !sc.ok {
t.Error("expected panic")
}
}()
InstrumentHandlerCounter(c, nil)
}()
func() {
defer func() {
if err := recover(); err != nil {
if sc.ok {
t.Error("unexpected panic:", err)
}
} else if !sc.ok {
t.Error("expected panic")
}
}()
InstrumentHandlerDuration(o, nil)
}()
if sc.ok {
// Test if wantCode and wantMethod were detected correctly.
var wantCode, wantMethod bool
for _, l := range sc.varLabels {
if l == "code" {
wantCode = true
}
if l == "method" {
wantMethod = true
}
}
gotCode, gotMethod := checkLabels(c)
if gotCode != wantCode {
t.Errorf("wanted code=%t for counter, got code=%t", wantCode, gotCode)
}
if gotMethod != wantMethod {
t.Errorf("wanted method=%t for counter, got method=%t", wantMethod, gotMethod)
}
gotCode, gotMethod = checkLabels(o)
if gotCode != wantCode {
t.Errorf("wanted code=%t for observer, got code=%t", wantCode, gotCode)
}
if gotMethod != wantMethod {
t.Errorf("wanted method=%t for observer, got method=%t", wantMethod, gotMethod)
}
}
})
}
}
func TestMiddlewareAPI(t *testing.T) {
reg := prometheus.NewRegistry()
inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "in_flight_requests",
Help: "A gauge of requests currently being served by the wrapped handler.",
})
counter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "api_requests_total",
Help: "A counter for requests to the wrapped handler.",
},
[]string{"code", "method"},
)
histVec := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "response_duration_seconds",
Help: "A histogram of request latencies.",
Buckets: prometheus.DefBuckets,
ConstLabels: prometheus.Labels{"handler": "api"},
},
[]string{"method"},
)
writeHeaderVec := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "write_header_duration_seconds",
Help: "A histogram of time to first write latencies.",
Buckets: prometheus.DefBuckets,
ConstLabels: prometheus.Labels{"handler": "api"},
},
[]string{},
)
responseSize := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "push_request_size_bytes",
Help: "A histogram of request sizes for requests.",
Buckets: []float64{200, 500, 900, 1500},
},
[]string{},
)
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("OK"))
})
reg.MustRegister(inFlightGauge, counter, histVec, responseSize, writeHeaderVec)
chain := InstrumentHandlerInFlight(inFlightGauge,
InstrumentHandlerCounter(counter,
InstrumentHandlerDuration(histVec,
InstrumentHandlerTimeToWriteHeader(writeHeaderVec,
InstrumentHandlerResponseSize(responseSize, handler),
),
),
),
)
r, _ := http.NewRequest("GET", "www.example.com", nil)
w := httptest.NewRecorder()
chain.ServeHTTP(w, r)
}
func TestInstrumentTimeToFirstWrite(t *testing.T) {
var i int
dobs := &responseWriterDelegator{
ResponseWriter: httptest.NewRecorder(),
observeWriteHeader: func(status int) {
i = status
},
}
d := newDelegator(dobs, nil)
d.WriteHeader(http.StatusOK)
if i != http.StatusOK {
t.Fatalf("failed to execute observeWriteHeader")
}
}
// testResponseWriter is an http.ResponseWriter that also implements
// http.CloseNotifier, http.Flusher, and io.ReaderFrom.
type testResponseWriter struct {
closeNotifyCalled, flushCalled, readFromCalled bool
}
func (t *testResponseWriter) Header() http.Header { return nil }
func (t *testResponseWriter) Write([]byte) (int, error) { return 0, nil }
func (t *testResponseWriter) WriteHeader(int) {}
func (t *testResponseWriter) CloseNotify() <-chan bool {
t.closeNotifyCalled = true
return nil
}
func (t *testResponseWriter) Flush() { t.flushCalled = true }
func (t *testResponseWriter) ReadFrom(io.Reader) (int64, error) {
t.readFromCalled = true
return 0, nil
}
// testFlusher is an http.ResponseWriter that also implements http.Flusher.
type testFlusher struct {
flushCalled bool
}
func (t *testFlusher) Header() http.Header { return nil }
func (t *testFlusher) Write([]byte) (int, error) { return 0, nil }
func (t *testFlusher) WriteHeader(int) {}
func (t *testFlusher) Flush() { t.flushCalled = true }
func TestInterfaceUpgrade(t *testing.T) {
w := &testResponseWriter{}
d := newDelegator(w, nil)
d.(http.CloseNotifier).CloseNotify()
if !w.closeNotifyCalled {
t.Error("CloseNotify not called")
}
d.(http.Flusher).Flush()
if !w.flushCalled {
t.Error("Flush not called")
}
d.(io.ReaderFrom).ReadFrom(nil)
if !w.readFromCalled {
t.Error("ReadFrom not called")
}
if _, ok := d.(http.Hijacker); ok {
t.Error("delegator unexpectedly implements http.Hijacker")
}
f := &testFlusher{}
d = newDelegator(f, nil)
if _, ok := d.(http.CloseNotifier); ok {
t.Error("delegator unexpectedly implements http.CloseNotifier")
}
d.(http.Flusher).Flush()
if !w.flushCalled {
t.Error("Flush not called")
}
if _, ok := d.(io.ReaderFrom); ok {
t.Error("delegator unexpectedly implements io.ReaderFrom")
}
if _, ok := d.(http.Hijacker); ok {
t.Error("delegator unexpectedly implements http.Hijacker")
}
}
func ExampleInstrumentHandlerDuration() {
inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "in_flight_requests",
Help: "A gauge of requests currently being served by the wrapped handler.",
})
counter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "api_requests_total",
Help: "A counter for requests to the wrapped handler.",
},
[]string{"code", "method"},
)
// duration is partitioned by the HTTP method and handler. It uses custom
// buckets based on the expected request duration.
duration := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "request_duration_seconds",
Help: "A histogram of latencies for requests.",
Buckets: []float64{.25, .5, 1, 2.5, 5, 10},
},
[]string{"handler", "method"},
)
// responseSize has no labels, making it a zero-dimensional
// ObserverVec.
responseSize := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "response_size_bytes",
Help: "A histogram of response sizes for requests.",
Buckets: []float64{200, 500, 900, 1500},
},
[]string{},
)
// Create the handlers that will be wrapped by the middleware.
pushHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("Push"))
})
pullHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("Pull"))
})
// Register all of the metrics in the standard registry.
prometheus.MustRegister(inFlightGauge, counter, duration, responseSize)
// Instrument the handlers with all the metrics, injecting the "handler"
// label by currying.
pushChain := InstrumentHandlerInFlight(inFlightGauge,
InstrumentHandlerDuration(duration.MustCurryWith(prometheus.Labels{"handler": "push"}),
InstrumentHandlerCounter(counter,
InstrumentHandlerResponseSize(responseSize, pushHandler),
),
),
)
pullChain := InstrumentHandlerInFlight(inFlightGauge,
InstrumentHandlerDuration(duration.MustCurryWith(prometheus.Labels{"handler": "pull"}),
InstrumentHandlerCounter(counter,
InstrumentHandlerResponseSize(responseSize, pullHandler),
),
),
)
http.Handle("/metrics", Handler())
http.Handle("/push", pushChain)
http.Handle("/pull", pullChain)
if err := http.ListenAndServe(":3000", nil); err != nil {
log.Fatal(err)
}
}

@ -0,0 +1,172 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package push
// This file contains only deprecated code. Remove after v0.9 is released.
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"strings"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/common/model"
"github.com/prometheus/client_golang/prometheus"
)
// FromGatherer triggers a metric collection by the provided Gatherer (which is
// usually implemented by a prometheus.Registry) and pushes all gathered metrics
// to the Pushgateway specified by url, using the provided job name and the
// (optional) further grouping labels (the grouping map may be nil). See the
// Pushgateway documentation for detailed implications of the job and other
// grouping labels. Neither the job name nor any grouping label value may
// contain a "/". The metrics pushed must not contain a job label of their own
// nor any of the grouping labels.
//
// You can use just host:port or ip:port as url, in which case 'http://' is
// added automatically. You can also include the schema in the URL. However, do
// not include the '/metrics/jobs/...' part.
//
// Note that all previously pushed metrics with the same job and other grouping
// labels will be replaced with the metrics pushed by this call. (It uses HTTP
// method 'PUT' to push to the Pushgateway.)
//
// Deprecated: Please use a Pusher created with New instead.
func FromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error {
return push(job, grouping, url, g, "PUT")
}
// AddFromGatherer works like FromGatherer, but only previously pushed metrics
// with the same name (and the same job and other grouping labels) will be
// replaced. (It uses HTTP method 'POST' to push to the Pushgateway.)
//
// Deprecated: Please use a Pusher created with New instead.
func AddFromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error {
return push(job, grouping, url, g, "POST")
}
func push(job string, grouping map[string]string, pushURL string, g prometheus.Gatherer, method string) error {
if !strings.Contains(pushURL, "://") {
pushURL = "http://" + pushURL
}
if strings.HasSuffix(pushURL, "/") {
pushURL = pushURL[:len(pushURL)-1]
}
if strings.Contains(job, "/") {
return fmt.Errorf("job contains '/': %s", job)
}
urlComponents := []string{url.QueryEscape(job)}
for ln, lv := range grouping {
if !model.LabelName(ln).IsValid() {
return fmt.Errorf("grouping label has invalid name: %s", ln)
}
if strings.Contains(lv, "/") {
return fmt.Errorf("value of grouping label %s contains '/': %s", ln, lv)
}
urlComponents = append(urlComponents, ln, lv)
}
pushURL = fmt.Sprintf("%s/metrics/job/%s", pushURL, strings.Join(urlComponents, "/"))
mfs, err := g.Gather()
if err != nil {
return err
}
buf := &bytes.Buffer{}
enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)
// Check for pre-existing grouping labels:
for _, mf := range mfs {
for _, m := range mf.GetMetric() {
for _, l := range m.GetLabel() {
if l.GetName() == "job" {
return fmt.Errorf("pushed metric %s (%s) already contains a job label", mf.GetName(), m)
}
if _, ok := grouping[l.GetName()]; ok {
return fmt.Errorf(
"pushed metric %s (%s) already contains grouping label %s",
mf.GetName(), m, l.GetName(),
)
}
}
}
enc.Encode(mf)
}
req, err := http.NewRequest(method, pushURL, buf)
if err != nil {
return err
}
req.Header.Set(contentTypeHeader, string(expfmt.FmtProtoDelim))
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 202 {
body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only.
return fmt.Errorf("unexpected status code %d while pushing to %s: %s", resp.StatusCode, pushURL, body)
}
return nil
}
// Collectors works like FromGatherer, but it does not use a Gatherer. Instead,
// it collects from the provided collectors directly. It is a convenient way to
// push only a few metrics.
//
// Deprecated: Please use a Pusher created with New instead.
func Collectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error {
return pushCollectors(job, grouping, url, "PUT", collectors...)
}
// AddCollectors works like AddFromGatherer, but it does not use a Gatherer.
// Instead, it collects from the provided collectors directly. It is a
// convenient way to push only a few metrics.
//
// Deprecated: Please use a Pusher created with New instead.
func AddCollectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error {
return pushCollectors(job, grouping, url, "POST", collectors...)
}
func pushCollectors(job string, grouping map[string]string, url, method string, collectors ...prometheus.Collector) error {
r := prometheus.NewRegistry()
for _, collector := range collectors {
if err := r.Register(collector); err != nil {
return err
}
}
return push(job, grouping, url, r, method)
}
// HostnameGroupingKey returns a label map with the only entry
// {instance="<hostname>"}. This can be conveniently used as the grouping
// parameter if metrics should be pushed with the hostname as label. The
// returned map is created upon each call so that the caller is free to add more
// labels to the map.
//
// Deprecated: Usually, metrics pushed to the Pushgateway should not be
// host-centric. (You would use https://github.com/prometheus/node_exporter in
// that case.) If you have the need to add the hostname to the grouping key, you
// are probably doing something wrong. See
// https://prometheus.io/docs/practices/pushing/ for details.
func HostnameGroupingKey() map[string]string {
hostname, err := os.Hostname()
if err != nil {
return map[string]string{"instance": "unknown"}
}
return map[string]string{"instance": hostname}
}

@ -0,0 +1,80 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package push_test
import (
"fmt"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/push"
)
var (
completionTime = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "db_backup_last_completion_timestamp_seconds",
Help: "The timestamp of the last completion of a DB backup, successful or not.",
})
successTime = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "db_backup_last_success_timestamp_seconds",
Help: "The timestamp of the last successful completion of a DB backup.",
})
duration = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "db_backup_duration_seconds",
Help: "The duration of the last DB backup in seconds.",
})
records = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "db_backup_records_processed",
Help: "The number of records processed in the last DB backup.",
})
)
func performBackup() (int, error) {
// Perform the backup and return the number of backed up records and any
// applicable error.
// ...
return 42, nil
}
func ExamplePusher_Add() {
// We use a registry here to benefit from the consistency checks that
// happen during registration.
registry := prometheus.NewRegistry()
registry.MustRegister(completionTime, duration, records)
// Note that successTime is not registered.
pusher := push.New("http://pushgateway:9091", "db_backup").Gatherer(registry)
start := time.Now()
n, err := performBackup()
records.Set(float64(n))
// Note that time.Since only uses a monotonic clock in Go1.9+.
duration.Set(time.Since(start).Seconds())
completionTime.SetToCurrentTime()
if err != nil {
fmt.Println("DB backup failed:", err)
} else {
// Add successTime to pusher only in case of success.
// We could as well register it with the registry.
// This example, however, demonstrates that you can
// mix Gatherers and Collectors when handling a Pusher.
pusher.Collector(successTime)
successTime.SetToCurrentTime()
}
// Add is used here rather than Push to not delete a previously pushed
// success timestamp in case of a failure of this backup.
if err := pusher.Add(); err != nil {
fmt.Println("Could not push to Pushgateway:", err)
}
}

@ -15,42 +15,21 @@ package push_test
import ( import (
"fmt" "fmt"
"time"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/push" "github.com/prometheus/client_golang/prometheus/push"
) )
func ExampleCollectors() { func ExamplePusher_Push() {
completionTime := prometheus.NewGauge(prometheus.GaugeOpts{ completionTime := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "db_backup_last_completion_timestamp_seconds", Name: "db_backup_last_completion_timestamp_seconds",
Help: "The timestamp of the last succesful completion of a DB backup.", Help: "The timestamp of the last successful completion of a DB backup.",
}) })
completionTime.Set(float64(time.Now().Unix())) completionTime.SetToCurrentTime()
if err := push.Collectors( if err := push.New("http://pushgateway:9091", "db_backup").
"db_backup", push.HostnameGroupingKey(), Collector(completionTime).
"http://pushgateway:9091", Grouping("db", "customers").
completionTime, Push(); err != nil {
); err != nil {
fmt.Println("Could not push completion time to Pushgateway:", err)
}
}
func ExampleRegistry() {
registry := prometheus.NewRegistry()
completionTime := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "db_backup_last_completion_timestamp_seconds",
Help: "The timestamp of the last succesful completion of a DB backup.",
})
registry.MustRegister(completionTime)
completionTime.Set(float64(time.Now().Unix()))
if err := push.FromGatherer(
"db_backup", push.HostnameGroupingKey(),
"http://pushgateway:9091",
registry,
); err != nil {
fmt.Println("Could not push completion time to Pushgateway:", err) fmt.Println("Could not push completion time to Pushgateway:", err)
} }
} }

@ -11,20 +11,27 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// Copyright (c) 2013, The Prometheus Authors // Package push provides functions to push metrics to a Pushgateway. It uses a
// All rights reserved. // builder approach. Create a Pusher with New and then add the various options
// by using its methods, finally calling Add or Push, like this:
// //
// Use of this source code is governed by a BSD-style license that can be found // // Easy case:
// in the LICENSE file. // push.New("http://example.org/metrics", "my_job").Gatherer(myRegistry).Push()
//
// Package push provides functions to push metrics to a Pushgateway. The metrics // // Complex case:
// to push are either collected from a provided registry, or from explicitly // push.New("http://example.org/metrics", "my_job").
// listed collectors. // Collector(myCollector1).
// Collector(myCollector2).
// Grouping("zone", "xy").
// Client(&myHTTPClient).
// BasicAuth("top", "secret").
// Add()
//
// See the examples section for more detailed examples.
// //
// See the documentation of the Pushgateway to understand the meaning of the // See the documentation of the Pushgateway to understand the meaning of
// grouping parameters and the differences between push.Registry and // the grouping key and the differences between Push and Add:
// push.Collectors on the one hand and push.AddRegistry and push.AddCollectors // https://github.com/prometheus/pushgateway
// on the other hand: https://github.com/prometheus/pushgateway
package push package push
import ( import (
@ -33,7 +40,6 @@ import (
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"os"
"strings" "strings"
"github.com/prometheus/common/expfmt" "github.com/prometheus/common/expfmt"
@ -44,57 +50,149 @@ import (
const contentTypeHeader = "Content-Type" const contentTypeHeader = "Content-Type"
// FromGatherer triggers a metric collection by the provided Gatherer (which is // Pusher manages a push to the Pushgateway. Use New to create one, configure it
// usually implemented by a prometheus.Registry) and pushes all gathered metrics // with its methods, and finally use the Add or Push method to push.
// to the Pushgateway specified by url, using the provided job name and the type Pusher struct {
// (optional) further grouping labels (the grouping map may be nil). See the error error
// Pushgateway documentation for detailed implications of the job and other
// grouping labels. Neither the job name nor any grouping label value may url, job string
// contain a "/". The metrics pushed must not contain a job label of their own grouping map[string]string
// nor any of the grouping labels.
gatherers prometheus.Gatherers
registerer prometheus.Registerer
client *http.Client
useBasicAuth bool
username, password string
}
// New creates a new Pusher to push to the provided URL with the provided job
// name. You can use just host:port or ip:port as url, in which case “http://”
// is added automatically. Alternatively, include the schema in the
// URL. However, do not include the “/metrics/jobs/…” part.
// //
// You can use just host:port or ip:port as url, in which case 'http://' is // Note that until https://github.com/prometheus/pushgateway/issues/97 is
// added automatically. You can also include the schema in the URL. However, do // resolved, a “/” character in the job name is prohibited.
// not include the '/metrics/jobs/...' part. func New(url, job string) *Pusher {
var (
reg = prometheus.NewRegistry()
err error
)
if !strings.Contains(url, "://") {
url = "http://" + url
}
if strings.HasSuffix(url, "/") {
url = url[:len(url)-1]
}
if strings.Contains(job, "/") {
err = fmt.Errorf("job contains '/': %s", job)
}
return &Pusher{
error: err,
url: url,
job: job,
grouping: map[string]string{},
gatherers: prometheus.Gatherers{reg},
registerer: reg,
client: &http.Client{},
}
}
// Push collects/gathers all metrics from all Collectors and Gatherers added to
// this Pusher. Then, it pushes them to the Pushgateway configured while
// creating this Pusher, using the configured job name and any added grouping
// labels as grouping key. All previously pushed metrics with the same job and
// other grouping labels will be replaced with the metrics pushed by this
// call. (It uses HTTP method “PUT” to push to the Pushgateway.)
// //
// Note that all previously pushed metrics with the same job and other grouping // Push returns the first error encountered by any method call (including this
// labels will be replaced with the metrics pushed by this call. (It uses HTTP // one) in the lifetime of the Pusher.
// method 'PUT' to push to the Pushgateway.) func (p *Pusher) Push() error {
func FromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error { return p.push("PUT")
return push(job, grouping, url, g, "PUT")
} }
// AddFromGatherer works like FromGatherer, but only previously pushed metrics // Add works like push, but only previously pushed metrics with the same name
// with the same name (and the same job and other grouping labels) will be // (and the same job and other grouping labels) will be replaced. (It uses HTTP
// replaced. (It uses HTTP method 'POST' to push to the Pushgateway.) // method “POST” to push to the Pushgateway.)
func AddFromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error { func (p *Pusher) Add() error {
return push(job, grouping, url, g, "POST") return p.push("POST")
} }
func push(job string, grouping map[string]string, pushURL string, g prometheus.Gatherer, method string) error { // Gatherer adds a Gatherer to the Pusher, from which metrics will be gathered
if !strings.Contains(pushURL, "://") { // to push them to the Pushgateway. The gathered metrics must not contain a job
pushURL = "http://" + pushURL // label of their own.
} //
if strings.HasSuffix(pushURL, "/") { // For convenience, this method returns a pointer to the Pusher itself.
pushURL = pushURL[:len(pushURL)-1] func (p *Pusher) Gatherer(g prometheus.Gatherer) *Pusher {
} p.gatherers = append(p.gatherers, g)
return p
}
if strings.Contains(job, "/") { // Collector adds a Collector to the Pusher, from which metrics will be
return fmt.Errorf("job contains '/': %s", job) // collected to push them to the Pushgateway. The collected metrics must not
// contain a job label of their own.
//
// For convenience, this method returns a pointer to the Pusher itself.
func (p *Pusher) Collector(c prometheus.Collector) *Pusher {
if p.error == nil {
p.error = p.registerer.Register(c)
} }
urlComponents := []string{url.QueryEscape(job)} return p
for ln, lv := range grouping { }
if !model.LabelNameRE.MatchString(ln) {
return fmt.Errorf("grouping label has invalid name: %s", ln) // Grouping adds a label pair to the grouping key of the Pusher, replacing any
// previously added label pair with the same label name. Note that setting any
// labels in the grouping key that are already contained in the metrics to push
// will lead to an error.
//
// For convenience, this method returns a pointer to the Pusher itself.
//
// Note that until https://github.com/prometheus/pushgateway/issues/97 is
// resolved, this method does not allow a “/” character in the label value.
func (p *Pusher) Grouping(name, value string) *Pusher {
if p.error == nil {
if !model.LabelName(name).IsValid() {
p.error = fmt.Errorf("grouping label has invalid name: %s", name)
return p
} }
if strings.Contains(lv, "/") { if strings.Contains(value, "/") {
return fmt.Errorf("value of grouping label %s contains '/': %s", ln, lv) p.error = fmt.Errorf("value of grouping label %s contains '/': %s", name, value)
return p
} }
p.grouping[name] = value
}
return p
}
// Client sets a custom HTTP client for the Pusher. For convenience, this method
// returns a pointer to the Pusher itself.
func (p *Pusher) Client(c *http.Client) *Pusher {
p.client = c
return p
}
// BasicAuth configures the Pusher to use HTTP Basic Authentication with the
// provided username and password. For convenience, this method returns a
// pointer to the Pusher itself.
func (p *Pusher) BasicAuth(username, password string) *Pusher {
p.useBasicAuth = true
p.username = username
p.password = password
return p
}
func (p *Pusher) push(method string) error {
if p.error != nil {
return p.error
}
urlComponents := []string{url.QueryEscape(p.job)}
for ln, lv := range p.grouping {
urlComponents = append(urlComponents, ln, lv) urlComponents = append(urlComponents, ln, lv)
} }
pushURL = fmt.Sprintf("%s/metrics/job/%s", pushURL, strings.Join(urlComponents, "/")) pushURL := fmt.Sprintf("%s/metrics/job/%s", p.url, strings.Join(urlComponents, "/"))
mfs, err := g.Gather() mfs, err := p.gatherers.Gather()
if err != nil { if err != nil {
return err return err
} }
@ -107,7 +205,7 @@ func push(job string, grouping map[string]string, pushURL string, g prometheus.G
if l.GetName() == "job" { if l.GetName() == "job" {
return fmt.Errorf("pushed metric %s (%s) already contains a job label", mf.GetName(), m) return fmt.Errorf("pushed metric %s (%s) already contains a job label", mf.GetName(), m)
} }
if _, ok := grouping[l.GetName()]; ok { if _, ok := p.grouping[l.GetName()]; ok {
return fmt.Errorf( return fmt.Errorf(
"pushed metric %s (%s) already contains grouping label %s", "pushed metric %s (%s) already contains grouping label %s",
mf.GetName(), m, l.GetName(), mf.GetName(), m, l.GetName(),
@ -121,8 +219,11 @@ func push(job string, grouping map[string]string, pushURL string, g prometheus.G
if err != nil { if err != nil {
return err return err
} }
if p.useBasicAuth {
req.SetBasicAuth(p.username, p.password)
}
req.Header.Set(contentTypeHeader, string(expfmt.FmtProtoDelim)) req.Header.Set(contentTypeHeader, string(expfmt.FmtProtoDelim))
resp, err := http.DefaultClient.Do(req) resp, err := p.client.Do(req)
if err != nil { if err != nil {
return err return err
} }
@ -133,40 +234,3 @@ func push(job string, grouping map[string]string, pushURL string, g prometheus.G
} }
return nil return nil
} }
// Collectors works like FromGatherer, but it does not use a Gatherer. Instead,
// it collects from the provided collectors directly. It is a convenient way to
// push only a few metrics.
func Collectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error {
return pushCollectors(job, grouping, url, "PUT", collectors...)
}
// AddCollectors works like AddFromGatherer, but it does not use a Gatherer.
// Instead, it collects from the provided collectors directly. It is a
// convenient way to push only a few metrics.
func AddCollectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error {
return pushCollectors(job, grouping, url, "POST", collectors...)
}
func pushCollectors(job string, grouping map[string]string, url, method string, collectors ...prometheus.Collector) error {
r := prometheus.NewRegistry()
for _, collector := range collectors {
if err := r.Register(collector); err != nil {
return err
}
}
return push(job, grouping, url, r, method)
}
// HostnameGroupingKey returns a label map with the only entry
// {instance="<hostname>"}. This can be conveniently used as the grouping
// parameter if metrics should be pushed with the hostname as label. The
// returned map is created upon each call so that the caller is free to add more
// labels to the map.
func HostnameGroupingKey() map[string]string {
hostname, err := os.Hostname()
if err != nil {
return map[string]string{"instance": "unknown"}
}
return map[string]string{"instance": hostname}
}

@ -11,12 +11,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// Copyright (c) 2013, The Prometheus Authors
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
package push package push
import ( import (
@ -24,7 +18,6 @@ import (
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"os"
"testing" "testing"
"github.com/prometheus/common/expfmt" "github.com/prometheus/common/expfmt"
@ -40,11 +33,6 @@ func TestPush(t *testing.T) {
lastPath string lastPath string
) )
host, err := os.Hostname()
if err != nil {
t.Error(err)
}
// Fake a Pushgateway that always responds with 202. // Fake a Pushgateway that always responds with 202.
pgwOK := httptest.NewServer( pgwOK := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@ -98,26 +86,32 @@ func TestPush(t *testing.T) {
} }
wantBody := buf.Bytes() wantBody := buf.Bytes()
// PushCollectors, all good. // Push some Collectors, all good.
if err := Collectors("testjob", HostnameGroupingKey(), pgwOK.URL, metric1, metric2); err != nil { if err := New(pgwOK.URL, "testjob").
Collector(metric1).
Collector(metric2).
Push(); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if lastMethod != "PUT" { if lastMethod != "PUT" {
t.Error("want method PUT for PushCollectors, got", lastMethod) t.Error("want method PUT for Push, got", lastMethod)
} }
if bytes.Compare(lastBody, wantBody) != 0 { if bytes.Compare(lastBody, wantBody) != 0 {
t.Errorf("got body %v, want %v", lastBody, wantBody) t.Errorf("got body %v, want %v", lastBody, wantBody)
} }
if lastPath != "/metrics/job/testjob/instance/"+host { if lastPath != "/metrics/job/testjob" {
t.Error("unexpected path:", lastPath) t.Error("unexpected path:", lastPath)
} }
// PushAddCollectors, with nil grouping, all good. // Add some Collectors, with nil grouping, all good.
if err := AddCollectors("testjob", nil, pgwOK.URL, metric1, metric2); err != nil { if err := New(pgwOK.URL, "testjob").
Collector(metric1).
Collector(metric2).
Add(); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if lastMethod != "POST" { if lastMethod != "POST" {
t.Error("want method POST for PushAddCollectors, got", lastMethod) t.Error("want method POST for Add, got", lastMethod)
} }
if bytes.Compare(lastBody, wantBody) != 0 { if bytes.Compare(lastBody, wantBody) != 0 {
t.Errorf("got body %v, want %v", lastBody, wantBody) t.Errorf("got body %v, want %v", lastBody, wantBody)
@ -126,8 +120,11 @@ func TestPush(t *testing.T) {
t.Error("unexpected path:", lastPath) t.Error("unexpected path:", lastPath)
} }
// PushCollectors with a broken PGW. // Push some Collectors with a broken PGW.
if err := Collectors("testjob", nil, pgwErr.URL, metric1, metric2); err == nil { if err := New(pgwErr.URL, "testjob").
Collector(metric1).
Collector(metric2).
Push(); err == nil {
t.Error("push to broken Pushgateway succeeded") t.Error("push to broken Pushgateway succeeded")
} else { } else {
if got, want := err.Error(), "unexpected status code 500 while pushing to "+pgwErr.URL+"/metrics/job/testjob: fake error\n"; got != want { if got, want := err.Error(), "unexpected status code 500 while pushing to "+pgwErr.URL+"/metrics/job/testjob: fake error\n"; got != want {
@ -135,22 +132,39 @@ func TestPush(t *testing.T) {
} }
} }
// PushCollectors with invalid grouping or job. // Push some Collectors with invalid grouping or job.
if err := Collectors("testjob", map[string]string{"foo": "bums"}, pgwErr.URL, metric1, metric2); err == nil { if err := New(pgwOK.URL, "testjob").
Grouping("foo", "bums").
Collector(metric1).
Collector(metric2).
Push(); err == nil {
t.Error("push with grouping contained in metrics succeeded") t.Error("push with grouping contained in metrics succeeded")
} }
if err := Collectors("test/job", nil, pgwErr.URL, metric1, metric2); err == nil { if err := New(pgwOK.URL, "test/job").
Collector(metric1).
Collector(metric2).
Push(); err == nil {
t.Error("push with invalid job value succeeded") t.Error("push with invalid job value succeeded")
} }
if err := Collectors("testjob", map[string]string{"foo/bar": "bums"}, pgwErr.URL, metric1, metric2); err == nil { if err := New(pgwOK.URL, "testjob").
Grouping("foobar", "bu/ms").
Collector(metric1).
Collector(metric2).
Push(); err == nil {
t.Error("push with invalid grouping succeeded") t.Error("push with invalid grouping succeeded")
} }
if err := Collectors("testjob", map[string]string{"foo-bar": "bums"}, pgwErr.URL, metric1, metric2); err == nil { if err := New(pgwOK.URL, "testjob").
Grouping("foo-bar", "bums").
Collector(metric1).
Collector(metric2).
Push(); err == nil {
t.Error("push with invalid grouping succeeded") t.Error("push with invalid grouping succeeded")
} }
// Push registry, all good. // Push registry, all good.
if err := FromGatherer("testjob", HostnameGroupingKey(), pgwOK.URL, reg); err != nil { if err := New(pgwOK.URL, "testjob").
Gatherer(reg).
Push(); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if lastMethod != "PUT" { if lastMethod != "PUT" {
@ -160,12 +174,16 @@ func TestPush(t *testing.T) {
t.Errorf("got body %v, want %v", lastBody, wantBody) t.Errorf("got body %v, want %v", lastBody, wantBody)
} }
// PushAdd registry, all good. // Add registry, all good.
if err := AddFromGatherer("testjob", map[string]string{"a": "x", "b": "y"}, pgwOK.URL, reg); err != nil { if err := New(pgwOK.URL, "testjob").
Grouping("a", "x").
Grouping("b", "y").
Gatherer(reg).
Add(); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if lastMethod != "POST" { if lastMethod != "POST" {
t.Error("want method POSTT for PushAdd, got", lastMethod) t.Error("want method POST for Add, got", lastMethod)
} }
if bytes.Compare(lastBody, wantBody) != 0 { if bytes.Compare(lastBody, wantBody) != 0 {
t.Errorf("got body %v, want %v", lastBody, wantBody) t.Errorf("got body %v, want %v", lastBody, wantBody)

@ -15,15 +15,18 @@ package prometheus
import ( import (
"bytes" "bytes"
"errors"
"fmt" "fmt"
"os" "runtime"
"sort" "sort"
"strings"
"sync" "sync"
"unicode/utf8"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/prometheus/internal"
) )
const ( const (
@ -35,13 +38,14 @@ const (
// DefaultRegisterer and DefaultGatherer are the implementations of the // DefaultRegisterer and DefaultGatherer are the implementations of the
// Registerer and Gatherer interface a number of convenience functions in this // Registerer and Gatherer interface a number of convenience functions in this
// package act on. Initially, both variables point to the same Registry, which // package act on. Initially, both variables point to the same Registry, which
// has a process collector (see NewProcessCollector) and a Go collector (see // has a process collector (currently on Linux only, see NewProcessCollector)
// NewGoCollector) already registered. This approach to keep default instances // and a Go collector (see NewGoCollector, in particular the note about
// as global state mirrors the approach of other packages in the Go standard // stop-the-world implication with Go versions older than 1.9) already
// library. Note that there are caveats. Change the variables with caution and // registered. This approach to keep default instances as global state mirrors
// only if you understand the consequences. Users who want to avoid global state // the approach of other packages in the Go standard library. Note that there
// altogether should not use the convenience function and act on custom // are caveats. Change the variables with caution and only if you understand the
// instances instead. // consequences. Users who want to avoid global state altogether should not use
// the convenience functions and act on custom instances instead.
var ( var (
defaultRegistry = NewRegistry() defaultRegistry = NewRegistry()
DefaultRegisterer Registerer = defaultRegistry DefaultRegisterer Registerer = defaultRegistry
@ -49,7 +53,7 @@ var (
) )
func init() { func init() {
MustRegister(NewProcessCollector(os.Getpid(), "")) MustRegister(NewProcessCollector(ProcessCollectorOpts{}))
MustRegister(NewGoCollector()) MustRegister(NewGoCollector())
} }
@ -65,7 +69,8 @@ func NewRegistry() *Registry {
// NewPedanticRegistry returns a registry that checks during collection if each // NewPedanticRegistry returns a registry that checks during collection if each
// collected Metric is consistent with its reported Desc, and if the Desc has // collected Metric is consistent with its reported Desc, and if the Desc has
// actually been registered with the registry. // actually been registered with the registry. Unchecked Collectors (those whose
// Describe methed does not yield any descriptors) are excluded from the check.
// //
// Usually, a Registry will be happy as long as the union of all collected // Usually, a Registry will be happy as long as the union of all collected
// Metrics is consistent and valid even if some metrics are not consistent with // Metrics is consistent and valid even if some metrics are not consistent with
@ -80,7 +85,7 @@ func NewPedanticRegistry() *Registry {
// Registerer is the interface for the part of a registry in charge of // Registerer is the interface for the part of a registry in charge of
// registering and unregistering. Users of custom registries should use // registering and unregistering. Users of custom registries should use
// Registerer as type for registration purposes (rather then the Registry type // Registerer as type for registration purposes (rather than the Registry type
// directly). In that way, they are free to use custom Registerer implementation // directly). In that way, they are free to use custom Registerer implementation
// (e.g. for testing purposes). // (e.g. for testing purposes).
type Registerer interface { type Registerer interface {
@ -95,8 +100,13 @@ type Registerer interface {
// returned error is an instance of AlreadyRegisteredError, which // returned error is an instance of AlreadyRegisteredError, which
// contains the previously registered Collector. // contains the previously registered Collector.
// //
// It is in general not safe to register the same Collector multiple // A Collector whose Describe method does not yield any Desc is treated
// times concurrently. // as unchecked. Registration will always succeed. No check for
// re-registering (see previous paragraph) is performed. Thus, the
// caller is responsible for not double-registering the same unchecked
// Collector, and for providing a Collector that will not cause
// inconsistent metrics on collection. (This would lead to scrape
// errors.)
Register(Collector) error Register(Collector) error
// MustRegister works like Register but registers any number of // MustRegister works like Register but registers any number of
// Collectors and panics upon the first registration that causes an // Collectors and panics upon the first registration that causes an
@ -105,7 +115,9 @@ type Registerer interface {
// Unregister unregisters the Collector that equals the Collector passed // Unregister unregisters the Collector that equals the Collector passed
// in as an argument. (Two Collectors are considered equal if their // in as an argument. (Two Collectors are considered equal if their
// Describe method yields the same set of descriptors.) The function // Describe method yields the same set of descriptors.) The function
// returns whether a Collector was unregistered. // returns whether a Collector was unregistered. Note that an unchecked
// Collector cannot be unregistered (as its Describe method does not
// yield any descriptor).
// //
// Note that even after unregistering, it will not be possible to // Note that even after unregistering, it will not be possible to
// register a new Collector that is inconsistent with the unregistered // register a new Collector that is inconsistent with the unregistered
@ -123,15 +135,23 @@ type Registerer interface {
type Gatherer interface { type Gatherer interface {
// Gather calls the Collect method of the registered Collectors and then // Gather calls the Collect method of the registered Collectors and then
// gathers the collected metrics into a lexicographically sorted slice // gathers the collected metrics into a lexicographically sorted slice
// of MetricFamily protobufs. Even if an error occurs, Gather attempts // of uniquely named MetricFamily protobufs. Gather ensures that the
// to gather as many metrics as possible. Hence, if a non-nil error is // returned slice is valid and self-consistent so that it can be used
// returned, the returned MetricFamily slice could be nil (in case of a // for valid exposition. As an exception to the strict consistency
// fatal error that prevented any meaningful metric collection) or // requirements described for metric.Desc, Gather will tolerate
// contain a number of MetricFamily protobufs, some of which might be // different sets of label names for metrics of the same metric family.
// incomplete, and some might be missing altogether. The returned error //
// (which might be a MultiError) explains the details. In scenarios // Even if an error occurs, Gather attempts to gather as many metrics as
// where complete collection is critical, the returned MetricFamily // possible. Hence, if a non-nil error is returned, the returned
// protobufs should be disregarded if the returned error is non-nil. // MetricFamily slice could be nil (in case of a fatal error that
// prevented any meaningful metric collection) or contain a number of
// MetricFamily protobufs, some of which might be incomplete, and some
// might be missing altogether. The returned error (which might be a
// MultiError) explains the details. Note that this is mostly useful for
// debugging purposes. If the gathered protobufs are to be used for
// exposition in actual monitoring, it is almost always better to not
// expose an incomplete result and instead disregard the returned
// MetricFamily protobufs in case the returned error is non-nil.
Gather() ([]*dto.MetricFamily, error) Gather() ([]*dto.MetricFamily, error)
} }
@ -152,38 +172,6 @@ func MustRegister(cs ...Collector) {
DefaultRegisterer.MustRegister(cs...) DefaultRegisterer.MustRegister(cs...)
} }
// RegisterOrGet registers the provided Collector with the DefaultRegisterer and
// returns the Collector, unless an equal Collector was registered before, in
// which case that Collector is returned.
//
// Deprecated: RegisterOrGet is merely a convenience function for the
// implementation as described in the documentation for
// AlreadyRegisteredError. As the use case is relatively rare, this function
// will be removed in a future version of this package to clean up the
// namespace.
func RegisterOrGet(c Collector) (Collector, error) {
if err := Register(c); err != nil {
if are, ok := err.(AlreadyRegisteredError); ok {
return are.ExistingCollector, nil
}
return nil, err
}
return c, nil
}
// MustRegisterOrGet behaves like RegisterOrGet but panics instead of returning
// an error.
//
// Deprecated: This is deprecated for the same reason RegisterOrGet is. See
// there for details.
func MustRegisterOrGet(c Collector) Collector {
c, err := RegisterOrGet(c)
if err != nil {
panic(err)
}
return c
}
// Unregister removes the registration of the provided Collector from the // Unregister removes the registration of the provided Collector from the
// DefaultRegisterer. // DefaultRegisterer.
// //
@ -201,25 +189,6 @@ func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) {
return gf() return gf()
} }
// SetMetricFamilyInjectionHook replaces the DefaultGatherer with one that
// gathers from the previous DefaultGatherers but then merges the MetricFamily
// protobufs returned from the provided hook function with the MetricFamily
// protobufs returned from the original DefaultGatherer.
//
// Deprecated: This function manipulates the DefaultGatherer variable. Consider
// the implications, i.e. don't do this concurrently with any uses of the
// DefaultGatherer. In the rare cases where you need to inject MetricFamily
// protobufs directly, it is recommended to use a custom Registry and combine it
// with a custom Gatherer using the Gatherers type (see
// there). SetMetricFamilyInjectionHook only exists for compatibility reasons
// with previous versions of this package.
func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) {
DefaultGatherer = Gatherers{
DefaultGatherer,
GathererFunc(func() ([]*dto.MetricFamily, error) { return hook(), nil }),
}
}
// AlreadyRegisteredError is returned by the Register method if the Collector to // AlreadyRegisteredError is returned by the Register method if the Collector to
// be registered has already been registered before, or a different Collector // be registered has already been registered before, or a different Collector
// that collects the same metrics has been registered before. Registration fails // that collects the same metrics has been registered before. Registration fails
@ -252,6 +221,13 @@ func (errs MultiError) Error() string {
return buf.String() return buf.String()
} }
// Append appends the provided error if it is not nil.
func (errs *MultiError) Append(err error) {
if err != nil {
*errs = append(*errs, err)
}
}
// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only // MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
// contained error as error if len(errs is 1). In all other cases, it returns // contained error as error if len(errs is 1). In all other cases, it returns
// the MultiError directly. This is helpful for returning a MultiError in a way // the MultiError directly. This is helpful for returning a MultiError in a way
@ -276,6 +252,7 @@ type Registry struct {
collectorsByID map[uint64]Collector // ID is a hash of the descIDs. collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
descIDs map[uint64]struct{} descIDs map[uint64]struct{}
dimHashesByName map[string]uint64 dimHashesByName map[string]uint64
uncheckedCollectors []Collector
pedanticChecksEnabled bool pedanticChecksEnabled bool
} }
@ -293,8 +270,13 @@ func (r *Registry) Register(c Collector) error {
close(descChan) close(descChan)
}() }()
r.mtx.Lock() r.mtx.Lock()
defer r.mtx.Unlock() defer func() {
// Coduct various tests... // Drain channel in case of premature return to not leak a goroutine.
for range descChan {
}
r.mtx.Unlock()
}()
// Conduct various tests...
for desc := range descChan { for desc := range descChan {
// Is the descriptor valid at all? // Is the descriptor valid at all?
@ -333,9 +315,10 @@ func (r *Registry) Register(c Collector) error {
} }
} }
} }
// Did anything happen at all? // A Collector yielding no Desc at all is considered unchecked.
if len(newDescIDs) == 0 { if len(newDescIDs) == 0 {
return errors.New("collector has no descriptors") r.uncheckedCollectors = append(r.uncheckedCollectors, c)
return nil
} }
if existing, exists := r.collectorsByID[collectorID]; exists { if existing, exists := r.collectorsByID[collectorID]; exists {
return AlreadyRegisteredError{ return AlreadyRegisteredError{
@ -409,31 +392,25 @@ func (r *Registry) MustRegister(cs ...Collector) {
// Gather implements Gatherer. // Gather implements Gatherer.
func (r *Registry) Gather() ([]*dto.MetricFamily, error) { func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
var ( var (
metricChan = make(chan Metric, capMetricChan) checkedMetricChan = make(chan Metric, capMetricChan)
metricHashes = map[uint64]struct{}{} uncheckedMetricChan = make(chan Metric, capMetricChan)
dimHashes = map[string]uint64{} metricHashes = map[uint64]struct{}{}
wg sync.WaitGroup wg sync.WaitGroup
errs MultiError // The collected errors to return in the end. errs MultiError // The collected errors to return in the end.
registeredDescIDs map[uint64]struct{} // Only used for pedantic checks registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
) )
r.mtx.RLock() r.mtx.RLock()
goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors)
metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
checkedCollectors := make(chan Collector, len(r.collectorsByID))
// Scatter. uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors))
// (Collectors could be complex and slow, so we call them all at once.)
wg.Add(len(r.collectorsByID))
go func() {
wg.Wait()
close(metricChan)
}()
for _, collector := range r.collectorsByID { for _, collector := range r.collectorsByID {
go func(collector Collector) { checkedCollectors <- collector
defer wg.Done() }
collector.Collect(metricChan) for _, collector := range r.uncheckedCollectors {
}(collector) uncheckedCollectors <- collector
} }
// In case pedantic checks are enabled, we have to copy the map before // In case pedantic checks are enabled, we have to copy the map before
// giving up the RLock. // giving up the RLock.
if r.pedanticChecksEnabled { if r.pedanticChecksEnabled {
@ -442,127 +419,226 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
registeredDescIDs[id] = struct{}{} registeredDescIDs[id] = struct{}{}
} }
} }
r.mtx.RUnlock() r.mtx.RUnlock()
// Drain metricChan in case of premature return. wg.Add(goroutineBudget)
collectWorker := func() {
for {
select {
case collector := <-checkedCollectors:
collector.Collect(checkedMetricChan)
case collector := <-uncheckedCollectors:
collector.Collect(uncheckedMetricChan)
default:
return
}
wg.Done()
}
}
// Start the first worker now to make sure at least one is running.
go collectWorker()
goroutineBudget--
// Close checkedMetricChan and uncheckedMetricChan once all collectors
// are collected.
go func() {
wg.Wait()
close(checkedMetricChan)
close(uncheckedMetricChan)
}()
// Drain checkedMetricChan and uncheckedMetricChan in case of premature return.
defer func() { defer func() {
for _ = range metricChan { if checkedMetricChan != nil {
for range checkedMetricChan {
}
}
if uncheckedMetricChan != nil {
for range uncheckedMetricChan {
}
} }
}() }()
// Gather. // Copy the channel references so we can nil them out later to remove
for metric := range metricChan { // them from the select statements below.
// This could be done concurrently, too, but it required locking cmc := checkedMetricChan
// of metricFamiliesByName (and of metricHashes if checks are umc := uncheckedMetricChan
// enabled). Most likely not worth it.
desc := metric.Desc() for {
dtoMetric := &dto.Metric{} select {
if err := metric.Write(dtoMetric); err != nil { case metric, ok := <-cmc:
errs = append(errs, fmt.Errorf( if !ok {
"error collecting metric %v: %s", desc, err, cmc = nil
break
}
errs.Append(processMetric(
metric, metricFamiliesByName,
metricHashes,
registeredDescIDs,
)) ))
continue case metric, ok := <-umc:
} if !ok {
metricFamily, ok := metricFamiliesByName[desc.fqName] umc = nil
if ok { break
if metricFamily.GetHelp() != desc.help {
errs = append(errs, fmt.Errorf(
"collected metric %s %s has help %q but should have %q",
desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
))
continue
} }
// TODO(beorn7): Simplify switch once Desc has type. errs.Append(processMetric(
switch metricFamily.GetType() { metric, metricFamiliesByName,
case dto.MetricType_COUNTER: metricHashes,
if dtoMetric.Counter == nil { nil,
errs = append(errs, fmt.Errorf( ))
"collected metric %s %s should be a Counter", default:
desc.fqName, dtoMetric, if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 {
)) // All collectors are already being worked on or
continue // we have already as many goroutines started as
} // there are collectors. Do the same as above,
case dto.MetricType_GAUGE: // just without the default.
if dtoMetric.Gauge == nil { select {
errs = append(errs, fmt.Errorf( case metric, ok := <-cmc:
"collected metric %s %s should be a Gauge", if !ok {
desc.fqName, dtoMetric, cmc = nil
)) break
continue }
} errs.Append(processMetric(
case dto.MetricType_SUMMARY: metric, metricFamiliesByName,
if dtoMetric.Summary == nil { metricHashes,
errs = append(errs, fmt.Errorf( registeredDescIDs,
"collected metric %s %s should be a Summary",
desc.fqName, dtoMetric,
)) ))
continue case metric, ok := <-umc:
} if !ok {
case dto.MetricType_UNTYPED: umc = nil
if dtoMetric.Untyped == nil { break
errs = append(errs, fmt.Errorf( }
"collected metric %s %s should be Untyped", errs.Append(processMetric(
desc.fqName, dtoMetric, metric, metricFamiliesByName,
metricHashes,
nil,
)) ))
continue
} }
case dto.MetricType_HISTOGRAM: break
if dtoMetric.Histogram == nil {
errs = append(errs, fmt.Errorf(
"collected metric %s %s should be a Histogram",
desc.fqName, dtoMetric,
))
continue
}
default:
panic("encountered MetricFamily with invalid type")
} }
} else { // Start more workers.
metricFamily = &dto.MetricFamily{} go collectWorker()
metricFamily.Name = proto.String(desc.fqName) goroutineBudget--
metricFamily.Help = proto.String(desc.help) runtime.Gosched()
// TODO(beorn7): Simplify switch once Desc has type. }
switch { // Once both checkedMetricChan and uncheckdMetricChan are closed
case dtoMetric.Gauge != nil: // and drained, the contraption above will nil out cmc and umc,
metricFamily.Type = dto.MetricType_GAUGE.Enum() // and then we can leave the collect loop here.
case dtoMetric.Counter != nil: if cmc == nil && umc == nil {
metricFamily.Type = dto.MetricType_COUNTER.Enum() break
case dtoMetric.Summary != nil:
metricFamily.Type = dto.MetricType_SUMMARY.Enum()
case dtoMetric.Untyped != nil:
metricFamily.Type = dto.MetricType_UNTYPED.Enum()
case dtoMetric.Histogram != nil:
metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
default:
errs = append(errs, fmt.Errorf(
"empty metric collected: %s", dtoMetric,
))
continue
}
metricFamiliesByName[desc.fqName] = metricFamily
} }
if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil { }
errs = append(errs, err) return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
continue }
// processMetric is an internal helper method only used by the Gather method.
func processMetric(
metric Metric,
metricFamiliesByName map[string]*dto.MetricFamily,
metricHashes map[uint64]struct{},
registeredDescIDs map[uint64]struct{},
) error {
desc := metric.Desc()
// Wrapped metrics collected by an unchecked Collector can have an
// invalid Desc.
if desc.err != nil {
return desc.err
}
dtoMetric := &dto.Metric{}
if err := metric.Write(dtoMetric); err != nil {
return fmt.Errorf("error collecting metric %v: %s", desc, err)
}
metricFamily, ok := metricFamiliesByName[desc.fqName]
if ok { // Existing name.
if metricFamily.GetHelp() != desc.help {
return fmt.Errorf(
"collected metric %s %s has help %q but should have %q",
desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
)
} }
if r.pedanticChecksEnabled { // TODO(beorn7): Simplify switch once Desc has type.
// Is the desc registered at all? switch metricFamily.GetType() {
if _, exist := registeredDescIDs[desc.id]; !exist { case dto.MetricType_COUNTER:
errs = append(errs, fmt.Errorf( if dtoMetric.Counter == nil {
"collected metric %s %s with unregistered descriptor %s", return fmt.Errorf(
metricFamily.GetName(), dtoMetric, desc, "collected metric %s %s should be a Counter",
)) desc.fqName, dtoMetric,
continue )
} }
if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { case dto.MetricType_GAUGE:
errs = append(errs, err) if dtoMetric.Gauge == nil {
continue return fmt.Errorf(
"collected metric %s %s should be a Gauge",
desc.fqName, dtoMetric,
)
} }
case dto.MetricType_SUMMARY:
if dtoMetric.Summary == nil {
return fmt.Errorf(
"collected metric %s %s should be a Summary",
desc.fqName, dtoMetric,
)
}
case dto.MetricType_UNTYPED:
if dtoMetric.Untyped == nil {
return fmt.Errorf(
"collected metric %s %s should be Untyped",
desc.fqName, dtoMetric,
)
}
case dto.MetricType_HISTOGRAM:
if dtoMetric.Histogram == nil {
return fmt.Errorf(
"collected metric %s %s should be a Histogram",
desc.fqName, dtoMetric,
)
}
default:
panic("encountered MetricFamily with invalid type")
}
} else { // New name.
metricFamily = &dto.MetricFamily{}
metricFamily.Name = proto.String(desc.fqName)
metricFamily.Help = proto.String(desc.help)
// TODO(beorn7): Simplify switch once Desc has type.
switch {
case dtoMetric.Gauge != nil:
metricFamily.Type = dto.MetricType_GAUGE.Enum()
case dtoMetric.Counter != nil:
metricFamily.Type = dto.MetricType_COUNTER.Enum()
case dtoMetric.Summary != nil:
metricFamily.Type = dto.MetricType_SUMMARY.Enum()
case dtoMetric.Untyped != nil:
metricFamily.Type = dto.MetricType_UNTYPED.Enum()
case dtoMetric.Histogram != nil:
metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
default:
return fmt.Errorf("empty metric collected: %s", dtoMetric)
}
if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil {
return err
} }
metricFamily.Metric = append(metricFamily.Metric, dtoMetric) metricFamiliesByName[desc.fqName] = metricFamily
} }
return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil {
return err
}
if registeredDescIDs != nil {
// Is the desc registered at all?
if _, exist := registeredDescIDs[desc.id]; !exist {
return fmt.Errorf(
"collected metric %s %s with unregistered descriptor %s",
metricFamily.GetName(), dtoMetric, desc,
)
}
if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
return err
}
}
metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
return nil
} }
// Gatherers is a slice of Gatherer instances that implements the Gatherer // Gatherers is a slice of Gatherer instances that implements the Gatherer
@ -588,7 +664,6 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
var ( var (
metricFamiliesByName = map[string]*dto.MetricFamily{} metricFamiliesByName = map[string]*dto.MetricFamily{}
metricHashes = map[uint64]struct{}{} metricHashes = map[uint64]struct{}{}
dimHashes = map[string]uint64{}
errs MultiError // The collected errors to return in the end. errs MultiError // The collected errors to return in the end.
) )
@ -625,10 +700,14 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
existingMF.Name = mf.Name existingMF.Name = mf.Name
existingMF.Help = mf.Help existingMF.Help = mf.Help
existingMF.Type = mf.Type existingMF.Type = mf.Type
if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil {
errs = append(errs, err)
continue
}
metricFamiliesByName[mf.GetName()] = existingMF metricFamiliesByName[mf.GetName()] = existingMF
} }
for _, m := range mf.Metric { for _, m := range mf.Metric {
if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil { if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil {
errs = append(errs, err) errs = append(errs, err)
continue continue
} }
@ -636,88 +715,80 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
} }
} }
} }
return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
} }
// metricSorter is a sortable slice of *dto.Metric. // checkSuffixCollisions checks for collisions with the “magic” suffixes the
type metricSorter []*dto.Metric // Prometheus text format and the internal metric representation of the
// Prometheus server add while flattening Summaries and Histograms.
func (s metricSorter) Len() int { func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error {
return len(s) var (
} newName = mf.GetName()
newType = mf.GetType()
func (s metricSorter) Swap(i, j int) { newNameWithoutSuffix = ""
s[i], s[j] = s[j], s[i] )
} switch {
case strings.HasSuffix(newName, "_count"):
func (s metricSorter) Less(i, j int) bool { newNameWithoutSuffix = newName[:len(newName)-6]
if len(s[i].Label) != len(s[j].Label) { case strings.HasSuffix(newName, "_sum"):
// This should not happen. The metrics are newNameWithoutSuffix = newName[:len(newName)-4]
// inconsistent. However, we have to deal with the fact, as case strings.HasSuffix(newName, "_bucket"):
// people might use custom collectors or metric family injection newNameWithoutSuffix = newName[:len(newName)-7]
// to create inconsistent metrics. So let's simply compare the }
// number of labels in this case. That will still yield if newNameWithoutSuffix != "" {
// reproducible sorting. if existingMF, ok := mfs[newNameWithoutSuffix]; ok {
return len(s[i].Label) < len(s[j].Label) switch existingMF.GetType() {
} case dto.MetricType_SUMMARY:
for n, lp := range s[i].Label { if !strings.HasSuffix(newName, "_bucket") {
vi := lp.GetValue() return fmt.Errorf(
vj := s[j].Label[n].GetValue() "collected metric named %q collides with previously collected summary named %q",
if vi != vj { newName, newNameWithoutSuffix,
return vi < vj )
}
case dto.MetricType_HISTOGRAM:
return fmt.Errorf(
"collected metric named %q collides with previously collected histogram named %q",
newName, newNameWithoutSuffix,
)
}
} }
} }
if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM {
// We should never arrive here. Multiple metrics with the same if _, ok := mfs[newName+"_count"]; ok {
// label set in the same scrape will lead to undefined ingestion return fmt.Errorf(
// behavior. However, as above, we have to provide stable sorting "collected histogram or summary named %q collides with previously collected metric named %q",
// here, even for inconsistent metrics. So sort equal metrics newName, newName+"_count",
// by their timestamp, with missing timestamps (implying "now") )
// coming last. }
if s[i].TimestampMs == nil { if _, ok := mfs[newName+"_sum"]; ok {
return false return fmt.Errorf(
} "collected histogram or summary named %q collides with previously collected metric named %q",
if s[j].TimestampMs == nil { newName, newName+"_sum",
return true )
}
return s[i].GetTimestampMs() < s[j].GetTimestampMs()
}
// normalizeMetricFamilies returns a MetricFamily slice whith empty
// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
// the slice, with the contained Metrics sorted within each MetricFamily.
func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
for _, mf := range metricFamiliesByName {
sort.Sort(metricSorter(mf.Metric))
}
names := make([]string, 0, len(metricFamiliesByName))
for name, mf := range metricFamiliesByName {
if len(mf.Metric) > 0 {
names = append(names, name)
} }
} }
sort.Strings(names) if newType == dto.MetricType_HISTOGRAM {
result := make([]*dto.MetricFamily, 0, len(names)) if _, ok := mfs[newName+"_bucket"]; ok {
for _, name := range names { return fmt.Errorf(
result = append(result, metricFamiliesByName[name]) "collected histogram named %q collides with previously collected metric named %q",
newName, newName+"_bucket",
)
}
} }
return result return nil
} }
// checkMetricConsistency checks if the provided Metric is consistent with the // checkMetricConsistency checks if the provided Metric is consistent with the
// provided MetricFamily. It also hashed the Metric labels and the MetricFamily // provided MetricFamily. It also hashes the Metric labels and the MetricFamily
// name. If the resulting hash is alread in the provided metricHashes, an error // name. If the resulting hash is already in the provided metricHashes, an error
// is returned. If not, it is added to metricHashes. The provided dimHashes maps // is returned. If not, it is added to metricHashes.
// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes
// doesn't yet contain a hash for the provided MetricFamily, it is
// added. Otherwise, an error is returned if the existing dimHashes in not equal
// the calculated dimHash.
func checkMetricConsistency( func checkMetricConsistency(
metricFamily *dto.MetricFamily, metricFamily *dto.MetricFamily,
dtoMetric *dto.Metric, dtoMetric *dto.Metric,
metricHashes map[uint64]struct{}, metricHashes map[uint64]struct{},
dimHashes map[string]uint64,
) error { ) error {
name := metricFamily.GetName()
// Type consistency with metric family. // Type consistency with metric family.
if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
@ -725,41 +796,59 @@ func checkMetricConsistency(
metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
return fmt.Errorf( return fmt.Errorf(
"collected metric %s %s is not a %s", "collected metric %q { %s} is not a %s",
metricFamily.GetName(), dtoMetric, metricFamily.GetType(), name, dtoMetric, metricFamily.GetType(),
) )
} }
// Is the metric unique (i.e. no other metric with the same name and the same label values)? previousLabelName := ""
for _, labelPair := range dtoMetric.GetLabel() {
labelName := labelPair.GetName()
if labelName == previousLabelName {
return fmt.Errorf(
"collected metric %q { %s} has two or more labels with the same name: %s",
name, dtoMetric, labelName,
)
}
if !checkLabelName(labelName) {
return fmt.Errorf(
"collected metric %q { %s} has a label with an invalid name: %s",
name, dtoMetric, labelName,
)
}
if dtoMetric.Summary != nil && labelName == quantileLabel {
return fmt.Errorf(
"collected metric %q { %s} must not have an explicit %q label",
name, dtoMetric, quantileLabel,
)
}
if !utf8.ValidString(labelPair.GetValue()) {
return fmt.Errorf(
"collected metric %q { %s} has a label named %q whose value is not utf8: %#v",
name, dtoMetric, labelName, labelPair.GetValue())
}
previousLabelName = labelName
}
// Is the metric unique (i.e. no other metric with the same name and the same labels)?
h := hashNew() h := hashNew()
h = hashAdd(h, metricFamily.GetName()) h = hashAdd(h, name)
h = hashAddByte(h, separatorByte) h = hashAddByte(h, separatorByte)
dh := hashNew()
// Make sure label pairs are sorted. We depend on it for the consistency // Make sure label pairs are sorted. We depend on it for the consistency
// check. // check.
sort.Sort(LabelPairSorter(dtoMetric.Label)) sort.Sort(labelPairSorter(dtoMetric.Label))
for _, lp := range dtoMetric.Label { for _, lp := range dtoMetric.Label {
h = hashAdd(h, lp.GetName())
h = hashAddByte(h, separatorByte)
h = hashAdd(h, lp.GetValue()) h = hashAdd(h, lp.GetValue())
h = hashAddByte(h, separatorByte) h = hashAddByte(h, separatorByte)
dh = hashAdd(dh, lp.GetName())
dh = hashAddByte(dh, separatorByte)
} }
if _, exists := metricHashes[h]; exists { if _, exists := metricHashes[h]; exists {
return fmt.Errorf( return fmt.Errorf(
"collected metric %s %s was collected before with the same name and label values", "collected metric %q { %s} was collected before with the same name and label values",
metricFamily.GetName(), dtoMetric, name, dtoMetric,
) )
} }
if dimHash, ok := dimHashes[metricFamily.GetName()]; ok {
if dimHash != dh {
return fmt.Errorf(
"collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family",
metricFamily.GetName(), dtoMetric,
)
}
} else {
dimHashes[metricFamily.GetName()] = dh
}
metricHashes[h] = struct{}{} metricHashes[h] = struct{}{}
return nil return nil
} }
@ -791,7 +880,7 @@ func checkDescConsistency(
metricFamily.GetName(), dtoMetric, desc, metricFamily.GetName(), dtoMetric, desc,
) )
} }
sort.Sort(LabelPairSorter(lpsFromDesc)) sort.Sort(labelPairSorter(lpsFromDesc))
for i, lpFromDesc := range lpsFromDesc { for i, lpFromDesc := range lpsFromDesc {
lpFromMetric := dtoMetric.Label[i] lpFromMetric := dtoMetric.Label[i]
if lpFromDesc.GetName() != lpFromMetric.GetName() || if lpFromDesc.GetName() != lpFromMetric.GetName() ||

@ -21,9 +21,12 @@ package prometheus_test
import ( import (
"bytes" "bytes"
"math/rand"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"sync"
"testing" "testing"
"time"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
@ -34,7 +37,22 @@ import (
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
) )
// uncheckedCollector wraps a Collector but its Describe method yields no Desc.
type uncheckedCollector struct {
c prometheus.Collector
}
func (u uncheckedCollector) Describe(_ chan<- *prometheus.Desc) {}
func (u uncheckedCollector) Collect(c chan<- prometheus.Metric) {
u.c.Collect(c)
}
func testHandler(t testing.TB) { func testHandler(t testing.TB) {
// TODO(beorn7): This test is a bit too "end-to-end". It tests quite a
// few moving parts that are not strongly coupled. They could/should be
// tested separately. However, the changes planned for v0.10 will
// require a major rework of this test anyway, at which time I will
// structure it in a better way.
metricVec := prometheus.NewCounterVec( metricVec := prometheus.NewCounterVec(
prometheus.CounterOpts{ prometheus.CounterOpts{
@ -209,6 +227,117 @@ metric: <
expectedMetricFamilyMergedWithExternalAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"different_val" > counter:<value:42 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val1" > counter:<value:1 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val2" > counter:<value:1 > > expectedMetricFamilyMergedWithExternalAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"different_val" > counter:<value:42 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val1" > counter:<value:1 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val2" > counter:<value:1 > >
`) `)
externalMetricFamilyWithInvalidLabelValue := &dto.MetricFamily{
Name: proto.String("name"),
Help: proto.String("docstring"),
Type: dto.MetricType_COUNTER.Enum(),
Metric: []*dto.Metric{
{
Label: []*dto.LabelPair{
{
Name: proto.String("constname"),
Value: proto.String("\xFF"),
},
{
Name: proto.String("labelname"),
Value: proto.String("different_val"),
},
},
Counter: &dto.Counter{
Value: proto.Float64(42),
},
},
},
}
expectedMetricFamilyInvalidLabelValueAsText := []byte(`An error has occurred during metrics gathering:
collected metric "name" { label:<name:"constname" value:"\377" > label:<name:"labelname" value:"different_val" > counter:<value:42 > } has a label named "constname" whose value is not utf8: "\xff"
`)
summary := prometheus.NewSummary(prometheus.SummaryOpts{
Name: "complex",
Help: "A metric to check collisions with _sum and _count.",
})
summaryAsText := []byte(`# HELP complex A metric to check collisions with _sum and _count.
# TYPE complex summary
complex{quantile="0.5"} NaN
complex{quantile="0.9"} NaN
complex{quantile="0.99"} NaN
complex_sum 0
complex_count 0
`)
histogram := prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "complex",
Help: "A metric to check collisions with _sun, _count, and _bucket.",
})
externalMetricFamilyWithBucketSuffix := &dto.MetricFamily{
Name: proto.String("complex_bucket"),
Help: proto.String("externaldocstring"),
Type: dto.MetricType_COUNTER.Enum(),
Metric: []*dto.Metric{
{
Counter: &dto.Counter{
Value: proto.Float64(1),
},
},
},
}
externalMetricFamilyWithBucketSuffixAsText := []byte(`# HELP complex_bucket externaldocstring
# TYPE complex_bucket counter
complex_bucket 1
`)
externalMetricFamilyWithCountSuffix := &dto.MetricFamily{
Name: proto.String("complex_count"),
Help: proto.String("externaldocstring"),
Type: dto.MetricType_COUNTER.Enum(),
Metric: []*dto.Metric{
{
Counter: &dto.Counter{
Value: proto.Float64(1),
},
},
},
}
bucketCollisionMsg := []byte(`An error has occurred during metrics gathering:
collected metric named "complex_bucket" collides with previously collected histogram named "complex"
`)
summaryCountCollisionMsg := []byte(`An error has occurred during metrics gathering:
collected metric named "complex_count" collides with previously collected summary named "complex"
`)
histogramCountCollisionMsg := []byte(`An error has occurred during metrics gathering:
collected metric named "complex_count" collides with previously collected histogram named "complex"
`)
externalMetricFamilyWithDuplicateLabel := &dto.MetricFamily{
Name: proto.String("broken_metric"),
Help: proto.String("The registry should detect the duplicate label."),
Type: dto.MetricType_COUNTER.Enum(),
Metric: []*dto.Metric{
{
Label: []*dto.LabelPair{
{
Name: proto.String("foo"),
Value: proto.String("bar"),
},
{
Name: proto.String("foo"),
Value: proto.String("baz"),
},
},
Counter: &dto.Counter{
Value: proto.Float64(2.7),
},
},
},
}
duplicateLabelMsg := []byte(`An error has occurred during metrics gathering:
collected metric "broken_metric" { label:<name:"foo" value:"bar" > label:<name:"foo" value:"baz" > counter:<value:2.7 > } has two or more labels with the same name: foo
`)
type output struct { type output struct {
headers map[string]string headers map[string]string
body []byte body []byte
@ -226,7 +355,7 @@ metric: <
}, },
out: output{ out: output{
headers: map[string]string{ headers: map[string]string{
"Content-Type": `text/plain; version=0.0.4`, "Content-Type": `text/plain; version=0.0.4; charset=utf-8`,
}, },
body: []byte{}, body: []byte{},
}, },
@ -237,7 +366,7 @@ metric: <
}, },
out: output{ out: output{
headers: map[string]string{ headers: map[string]string{
"Content-Type": `text/plain; version=0.0.4`, "Content-Type": `text/plain; version=0.0.4; charset=utf-8`,
}, },
body: []byte{}, body: []byte{},
}, },
@ -248,7 +377,7 @@ metric: <
}, },
out: output{ out: output{
headers: map[string]string{ headers: map[string]string{
"Content-Type": `text/plain; version=0.0.4`, "Content-Type": `text/plain; version=0.0.4; charset=utf-8`,
}, },
body: []byte{}, body: []byte{},
}, },
@ -270,7 +399,7 @@ metric: <
}, },
out: output{ out: output{
headers: map[string]string{ headers: map[string]string{
"Content-Type": `text/plain; version=0.0.4`, "Content-Type": `text/plain; version=0.0.4; charset=utf-8`,
}, },
body: expectedMetricFamilyAsText, body: expectedMetricFamilyAsText,
}, },
@ -294,7 +423,7 @@ metric: <
}, },
out: output{ out: output{
headers: map[string]string{ headers: map[string]string{
"Content-Type": `text/plain; version=0.0.4`, "Content-Type": `text/plain; version=0.0.4; charset=utf-8`,
}, },
body: externalMetricFamilyAsText, body: externalMetricFamilyAsText,
}, },
@ -337,7 +466,7 @@ metric: <
}, },
out: output{ out: output{
headers: map[string]string{ headers: map[string]string{
"Content-Type": `text/plain; version=0.0.4`, "Content-Type": `text/plain; version=0.0.4; charset=utf-8`,
}, },
body: []byte{}, body: []byte{},
}, },
@ -348,7 +477,7 @@ metric: <
}, },
out: output{ out: output{
headers: map[string]string{ headers: map[string]string{
"Content-Type": `text/plain; version=0.0.4`, "Content-Type": `text/plain; version=0.0.4; charset=utf-8`,
}, },
body: expectedMetricFamilyAsText, body: expectedMetricFamilyAsText,
}, },
@ -360,7 +489,7 @@ metric: <
}, },
out: output{ out: output{
headers: map[string]string{ headers: map[string]string{
"Content-Type": `text/plain; version=0.0.4`, "Content-Type": `text/plain; version=0.0.4; charset=utf-8`,
}, },
body: bytes.Join( body: bytes.Join(
[][]byte{ [][]byte{
@ -452,6 +581,114 @@ metric: <
externalMetricFamilyWithSameName, externalMetricFamilyWithSameName,
}, },
}, },
{ // 16
headers: map[string]string{
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text",
},
out: output{
headers: map[string]string{
"Content-Type": `text/plain; charset=utf-8`,
},
body: expectedMetricFamilyInvalidLabelValueAsText,
},
collector: metricVec,
externalMF: []*dto.MetricFamily{
externalMetricFamily,
externalMetricFamilyWithInvalidLabelValue,
},
},
{ // 17
headers: map[string]string{
"Accept": "text/plain",
},
out: output{
headers: map[string]string{
"Content-Type": `text/plain; version=0.0.4; charset=utf-8`,
},
body: expectedMetricFamilyAsText,
},
collector: uncheckedCollector{metricVec},
},
{ // 18
headers: map[string]string{
"Accept": "text/plain",
},
out: output{
headers: map[string]string{
"Content-Type": `text/plain; charset=utf-8`,
},
body: histogramCountCollisionMsg,
},
collector: histogram,
externalMF: []*dto.MetricFamily{
externalMetricFamilyWithCountSuffix,
},
},
{ // 19
headers: map[string]string{
"Accept": "text/plain",
},
out: output{
headers: map[string]string{
"Content-Type": `text/plain; charset=utf-8`,
},
body: bucketCollisionMsg,
},
collector: histogram,
externalMF: []*dto.MetricFamily{
externalMetricFamilyWithBucketSuffix,
},
},
{ // 20
headers: map[string]string{
"Accept": "text/plain",
},
out: output{
headers: map[string]string{
"Content-Type": `text/plain; charset=utf-8`,
},
body: summaryCountCollisionMsg,
},
collector: summary,
externalMF: []*dto.MetricFamily{
externalMetricFamilyWithCountSuffix,
},
},
{ // 21
headers: map[string]string{
"Accept": "text/plain",
},
out: output{
headers: map[string]string{
"Content-Type": `text/plain; version=0.0.4; charset=utf-8`,
},
body: bytes.Join(
[][]byte{
summaryAsText,
externalMetricFamilyWithBucketSuffixAsText,
},
[]byte{},
),
},
collector: summary,
externalMF: []*dto.MetricFamily{
externalMetricFamilyWithBucketSuffix,
},
},
{ // 22
headers: map[string]string{
"Accept": "text/plain",
},
out: output{
headers: map[string]string{
"Content-Type": `text/plain; charset=utf-8`,
},
body: duplicateLabelMsg,
},
externalMF: []*dto.MetricFamily{
externalMetricFamilyWithDuplicateLabel,
},
},
} }
for i, scenario := range scenarios { for i, scenario := range scenarios {
registry := prometheus.NewPedanticRegistry() registry := prometheus.NewPedanticRegistry()
@ -466,7 +703,7 @@ metric: <
} }
if scenario.collector != nil { if scenario.collector != nil {
registry.Register(scenario.collector) registry.MustRegister(scenario.collector)
} }
writer := httptest.NewRecorder() writer := httptest.NewRecorder()
handler := prometheus.InstrumentHandler("prometheus", promhttp.HandlerFor(gatherer, promhttp.HandlerOpts{})) handler := prometheus.InstrumentHandler("prometheus", promhttp.HandlerFor(gatherer, promhttp.HandlerOpts{}))
@ -477,7 +714,7 @@ metric: <
handler(writer, request) handler(writer, request)
for key, value := range scenario.out.headers { for key, value := range scenario.out.headers {
if writer.HeaderMap.Get(key) != value { if writer.Header().Get(key) != value {
t.Errorf( t.Errorf(
"%d. expected %q for header %q, got %q", "%d. expected %q for header %q, got %q",
i, value, key, writer.Header().Get(key), i, value, key, writer.Header().Get(key),
@ -504,14 +741,8 @@ func BenchmarkHandler(b *testing.B) {
} }
} }
func TestRegisterWithOrGet(t *testing.T) { func TestAlreadyRegistered(t *testing.T) {
// Replace the default registerer just to be sure. This is bad, but this reg := prometheus.NewRegistry()
// whole test will go away once RegisterOrGet is removed.
oldRegisterer := prometheus.DefaultRegisterer
defer func() {
prometheus.DefaultRegisterer = oldRegisterer
}()
prometheus.DefaultRegisterer = prometheus.NewRegistry()
original := prometheus.NewCounterVec( original := prometheus.NewCounterVec(
prometheus.CounterOpts{ prometheus.CounterOpts{
Name: "test", Name: "test",
@ -526,20 +757,117 @@ func TestRegisterWithOrGet(t *testing.T) {
}, },
[]string{"foo", "bar"}, []string{"foo", "bar"},
) )
if err := prometheus.Register(original); err != nil { var err error
if err = reg.Register(original); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if err := prometheus.Register(equalButNotSame); err == nil { if err = reg.Register(equalButNotSame); err == nil {
t.Fatal("expected error when registringe equal collector") t.Fatal("expected error when registering equal collector")
} }
existing, err := prometheus.RegisterOrGet(equalButNotSame) if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
if err != nil { if are.ExistingCollector != original {
t.Fatal(err) t.Error("expected original collector but got something else")
}
if are.ExistingCollector == equalButNotSame {
t.Error("expected original callector but got new one")
}
} else {
t.Error("unexpected error:", err)
}
}
// TestHistogramVecRegisterGatherConcurrency is an end-to-end test that
// concurrently calls Observe on random elements of a HistogramVec while the
// same HistogramVec is registered concurrently and the Gather method of the
// registry is called concurrently.
func TestHistogramVecRegisterGatherConcurrency(t *testing.T) {
var (
reg = prometheus.NewPedanticRegistry()
hv = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "test_histogram",
Help: "This helps testing.",
ConstLabels: prometheus.Labels{"foo": "bar"},
},
[]string{"one", "two", "three"},
)
labelValues = []string{"a", "b", "c", "alpha", "beta", "gamma", "aleph", "beth", "gimel"}
quit = make(chan struct{})
wg sync.WaitGroup
)
observe := func() {
defer wg.Done()
for {
select {
case <-quit:
return
default:
obs := rand.NormFloat64()*.1 + .2
hv.WithLabelValues(
labelValues[rand.Intn(len(labelValues))],
labelValues[rand.Intn(len(labelValues))],
labelValues[rand.Intn(len(labelValues))],
).Observe(obs)
}
}
} }
if existing != original {
t.Error("expected original collector but got something else") register := func() {
defer wg.Done()
for {
select {
case <-quit:
return
default:
if err := reg.Register(hv); err != nil {
if _, ok := err.(prometheus.AlreadyRegisteredError); !ok {
t.Error("Registering failed:", err)
}
}
time.Sleep(7 * time.Millisecond)
}
}
} }
if existing == equalButNotSame {
t.Error("expected original callector but got new one") gather := func() {
defer wg.Done()
for {
select {
case <-quit:
return
default:
if g, err := reg.Gather(); err != nil {
t.Error("Gathering failed:", err)
} else {
if len(g) == 0 {
continue
}
if len(g) != 1 {
t.Error("Gathered unexpected number of metric families:", len(g))
}
if len(g[0].Metric[0].Label) != 4 {
t.Error("Gathered unexpected number of label pairs:", len(g[0].Metric[0].Label))
}
}
time.Sleep(4 * time.Millisecond)
}
}
} }
wg.Add(10)
go observe()
go observe()
go register()
go observe()
go gather()
go observe()
go register()
go observe()
go gather()
go observe()
time.Sleep(time.Second)
close(quit)
wg.Wait()
} }

@ -36,7 +36,10 @@ const quantileLabel = "quantile"
// //
// A typical use-case is the observation of request latencies. By default, a // A typical use-case is the observation of request latencies. By default, a
// Summary provides the median, the 90th and the 99th percentile of the latency // Summary provides the median, the 90th and the 99th percentile of the latency
// as rank estimations. // as rank estimations. However, the default behavior will change in the
// upcoming v0.10 of the library. There will be no rank estimations at all by
// default. For a sane transition, it is recommended to set the desired rank
// estimations explicitly.
// //
// Note that the rank estimations cannot be aggregated in a meaningful way with // Note that the rank estimations cannot be aggregated in a meaningful way with
// the Prometheus query language (i.e. you cannot average or add them). If you // the Prometheus query language (i.e. you cannot average or add them). If you
@ -54,6 +57,9 @@ type Summary interface {
} }
// DefObjectives are the default Summary quantile values. // DefObjectives are the default Summary quantile values.
//
// Deprecated: DefObjectives will not be used as the default objectives in
// v0.10 of the library. The default Summary will have no quantiles then.
var ( var (
DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
@ -75,8 +81,10 @@ const (
) )
// SummaryOpts bundles the options for creating a Summary metric. It is // SummaryOpts bundles the options for creating a Summary metric. It is
// mandatory to set Name and Help to a non-empty string. All other fields are // mandatory to set Name to a non-empty string. While all other fields are
// optional and can safely be left at their zero value. // optional and can safely be left at their zero value, it is recommended to set
// a help string and to explicitly set the Objectives field to the desired value
// as the default value will change in the upcoming v0.10 of the library.
type SummaryOpts struct { type SummaryOpts struct {
// Namespace, Subsystem, and Name are components of the fully-qualified // Namespace, Subsystem, and Name are components of the fully-qualified
// name of the Summary (created by joining these components with // name of the Summary (created by joining these components with
@ -87,35 +95,39 @@ type SummaryOpts struct {
Subsystem string Subsystem string
Name string Name string
// Help provides information about this Summary. Mandatory! // Help provides information about this Summary.
// //
// Metrics with the same fully-qualified name must have the same Help // Metrics with the same fully-qualified name must have the same Help
// string. // string.
Help string Help string
// ConstLabels are used to attach fixed labels to this // ConstLabels are used to attach fixed labels to this metric. Metrics
// Summary. Summaries with the same fully-qualified name must have the // with the same fully-qualified name must have the same label names in
// same label names in their ConstLabels. // their ConstLabels.
// //
// Note that in most cases, labels have a value that varies during the // Due to the way a Summary is represented in the Prometheus text format
// lifetime of a process. Those labels are usually managed with a // and how it is handled by the Prometheus server internally, “quantile”
// SummaryVec. ConstLabels serve only special purposes. One is for the // is an illegal label name. Construction of a Summary or SummaryVec
// special case where the value of a label does not change during the // will panic if this label name is used in ConstLabels.
// lifetime of a process, e.g. if the revision of the running binary is
// put into a label. Another, more advanced purpose is if more than one
// Collector needs to collect Summaries with the same fully-qualified
// name. In that case, those Summaries must differ in the values of
// their ConstLabels. See the Collector examples.
// //
// If the value of a label never changes (not even between binaries), // ConstLabels are only used rarely. In particular, do not use them to
// that label most likely should not be a label at all (but part of the // attach the same labels to all your metrics. Those use cases are
// metric name). // better covered by target labels set by the scraping Prometheus
// server, or by one specific metric (e.g. a build_info or a
// machine_role metric). See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
ConstLabels Labels ConstLabels Labels
// Objectives defines the quantile rank estimates with their respective // Objectives defines the quantile rank estimates with their respective
// absolute error. If Objectives[q] = e, then the value reported // absolute error. If Objectives[q] = e, then the value reported for q
// for q will be the φ-quantile value for some φ between q-e and q+e. // will be the φ-quantile value for some φ between q-e and q+e. The
// The default value is DefObjectives. // default value is DefObjectives. It is used if Objectives is left at
// its zero value (i.e. nil). To create a Summary without Objectives,
// set it to an empty map (i.e. map[float64]float64{}).
//
// Deprecated: Note that the current value of DefObjectives is
// deprecated. It will be replaced by an empty map in v0.10 of the
// library. Please explicitly set Objectives to the desired value.
Objectives map[float64]float64 Objectives map[float64]float64
// MaxAge defines the duration for which an observation stays relevant // MaxAge defines the duration for which an observation stays relevant
@ -183,7 +195,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
} }
} }
if len(opts.Objectives) == 0 { if opts.Objectives == nil {
opts.Objectives = DefObjectives opts.Objectives = DefObjectives
} }
@ -390,13 +402,21 @@ func (s quantSort) Less(i, j int) bool {
// (e.g. HTTP request latencies, partitioned by status code and method). Create // (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewSummaryVec. // instances with NewSummaryVec.
type SummaryVec struct { type SummaryVec struct {
*MetricVec *metricVec
} }
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and // NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
// partitioned by the given label names. At least one label name must be // partitioned by the given label names.
// provided. //
// Due to the way a Summary is represented in the Prometheus text format and how
// it is handled by the Prometheus server internally, “quantile” is an illegal
// label name. NewSummaryVec will panic if this label name is used.
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
for _, ln := range labelNames {
if ln == quantileLabel {
panic(errQuantileLabelNotAllowed)
}
}
desc := NewDesc( desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help, opts.Help,
@ -404,47 +424,116 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
opts.ConstLabels, opts.ConstLabels,
) )
return &SummaryVec{ return &SummaryVec{
MetricVec: newMetricVec(desc, func(lvs ...string) Metric { metricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newSummary(desc, opts, lvs...) return newSummary(desc, opts, lvs...)
}), }),
} }
} }
// GetMetricWithLabelValues replaces the method of the same name in // GetMetricWithLabelValues returns the Summary for the given slice of label
// MetricVec. The difference is that this method returns a Summary and not a // values (same order as the VariableLabels in Desc). If that combination of
// Metric so that no type conversion is required. // label values is accessed for the first time, a new Summary is created.
func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) { //
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) // It is possible to call this method without using the returned Summary to only
// create the new Summary but leave it at its starting value, a Summary without
// any observations.
//
// Keeping the Summary for later use is possible (and should be considered if
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
// Delete can be used to delete the Summary from the SummaryVec. In that case,
// the Summary will still exist, but it will not be exported anymore, even if a
// Summary with the same label values is created later. See also the CounterVec
// example.
//
// An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
// an alternative to avoid that type of mistake. For higher label numbers, the
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example.
func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
if metric != nil { if metric != nil {
return metric.(Summary), err return metric.(Observer), err
} }
return nil, err return nil, err
} }
// GetMetricWith replaces the method of the same name in MetricVec. The // GetMetricWith returns the Summary for the given Labels map (the label names
// difference is that this method returns a Summary and not a Metric so that no // must match those of the VariableLabels in Desc). If that label map is
// type conversion is required. // accessed for the first time, a new Summary is created. Implications of
func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) { // creating a Summary without using it and keeping the Summary for later use are
metric, err := m.MetricVec.GetMetricWith(labels) // the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
metric, err := v.metricVec.getMetricWith(labels)
if metric != nil { if metric != nil {
return metric.(Summary), err return metric.(Observer), err
} }
return nil, err return nil, err
} }
// WithLabelValues works as GetMetricWithLabelValues, but panics where // WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. By not returning an // GetMetricWithLabelValues would have returned an error. Not returning an
// error, WithLabelValues allows shortcuts like // error allows shortcuts like
// myVec.WithLabelValues("404", "GET").Observe(42.21) // myVec.WithLabelValues("404", "GET").Observe(42.21)
func (m *SummaryVec) WithLabelValues(lvs ...string) Summary { func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
return m.MetricVec.WithLabelValues(lvs...).(Summary) s, err := v.GetMetricWithLabelValues(lvs...)
if err != nil {
panic(err)
}
return s
} }
// With works as GetMetricWith, but panics where GetMetricWithLabels would have // With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. By not returning an error, With allows shortcuts like // returned an error. Not returning an error allows shortcuts like
// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) // myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
func (m *SummaryVec) With(labels Labels) Summary { func (v *SummaryVec) With(labels Labels) Observer {
return m.MetricVec.With(labels).(Summary) s, err := v.GetMetricWith(labels)
if err != nil {
panic(err)
}
return s
}
// CurryWith returns a vector curried with the provided labels, i.e. the
// returned vector has those labels pre-set for all labeled operations performed
// on it. The cardinality of the curried vector is reduced accordingly. The
// order of the remaining labels stays the same (just with the curried labels
// taken out of the sequence which is relevant for the
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
// vector, but only with labels not yet used for currying before.
//
// The metrics contained in the SummaryVec are shared between the curried and
// uncurried vectors. They are just accessed differently. Curried and uncurried
// vectors behave identically in terms of collection. Only one must be
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) {
vec, err := v.curryWith(labels)
if vec != nil {
return &SummaryVec{vec}, err
}
return nil, err
}
// MustCurryWith works as CurryWith but panics where CurryWith would have
// returned an error.
func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec {
vec, err := v.CurryWith(labels)
if err != nil {
panic(err)
}
return vec
} }
type constSummary struct { type constSummary struct {
@ -497,7 +586,7 @@ func (s *constSummary) Write(out *dto.Metric) error {
// map[float64]float64{0.5: 0.23, 0.99: 0.56} // map[float64]float64{0.5: 0.23, 0.99: 0.56}
// //
// NewConstSummary returns an error if the length of labelValues is not // NewConstSummary returns an error if the length of labelValues is not
// consistent with the variable labels in Desc. // consistent with the variable labels in Desc or if Desc is invalid.
func NewConstSummary( func NewConstSummary(
desc *Desc, desc *Desc,
count uint64, count uint64,
@ -505,8 +594,11 @@ func NewConstSummary(
quantiles map[float64]float64, quantiles map[float64]float64,
labelValues ...string, labelValues ...string,
) (Metric, error) { ) (Metric, error) {
if len(desc.variableLabels) != len(labelValues) { if desc.err != nil {
return nil, errInconsistentCardinality return nil, desc.err
}
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
return nil, err
} }
return &constSummary{ return &constSummary{
desc: desc, desc: desc,

@ -25,6 +25,70 @@ import (
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
) )
func TestSummaryWithDefaultObjectives(t *testing.T) {
reg := NewRegistry()
summaryWithDefaultObjectives := NewSummary(SummaryOpts{
Name: "default_objectives",
Help: "Test help.",
})
if err := reg.Register(summaryWithDefaultObjectives); err != nil {
t.Error(err)
}
m := &dto.Metric{}
if err := summaryWithDefaultObjectives.Write(m); err != nil {
t.Error(err)
}
if len(m.GetSummary().Quantile) != len(DefObjectives) {
t.Error("expected default objectives in summary")
}
}
func TestSummaryWithoutObjectives(t *testing.T) {
reg := NewRegistry()
summaryWithEmptyObjectives := NewSummary(SummaryOpts{
Name: "empty_objectives",
Help: "Test help.",
Objectives: map[float64]float64{},
})
if err := reg.Register(summaryWithEmptyObjectives); err != nil {
t.Error(err)
}
m := &dto.Metric{}
if err := summaryWithEmptyObjectives.Write(m); err != nil {
t.Error(err)
}
if len(m.GetSummary().Quantile) != 0 {
t.Error("expected no objectives in summary")
}
}
func TestSummaryWithQuantileLabel(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Error("Attempt to create Summary with 'quantile' label did not panic.")
}
}()
_ = NewSummary(SummaryOpts{
Name: "test_summary",
Help: "less",
ConstLabels: Labels{"quantile": "test"},
})
}
func TestSummaryVecWithQuantileLabel(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Error("Attempt to create SummaryVec with 'quantile' label did not panic.")
}
}()
_ = NewSummaryVec(SummaryOpts{
Name: "test_summary",
Help: "less",
}, []string{"quantile"})
}
func benchmarkSummaryObserve(w int, b *testing.B) { func benchmarkSummaryObserve(w int, b *testing.B) {
b.StopTimer() b.StopTimer()
@ -136,8 +200,9 @@ func TestSummaryConcurrency(t *testing.T) {
end.Add(concLevel) end.Add(concLevel)
sum := NewSummary(SummaryOpts{ sum := NewSummary(SummaryOpts{
Name: "test_summary", Name: "test_summary",
Help: "helpless", Help: "helpless",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}) })
allVars := make([]float64, total) allVars := make([]float64, total)
@ -223,8 +288,9 @@ func TestSummaryVecConcurrency(t *testing.T) {
sum := NewSummaryVec( sum := NewSummaryVec(
SummaryOpts{ SummaryOpts{
Name: "test_summary", Name: "test_summary",
Help: "helpless", Help: "helpless",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}, },
[]string{"label"}, []string{"label"},
) )
@ -260,7 +326,7 @@ func TestSummaryVecConcurrency(t *testing.T) {
for i := 0; i < vecLength; i++ { for i := 0; i < vecLength; i++ {
m := &dto.Metric{} m := &dto.Metric{}
s := sum.WithLabelValues(string('A' + i)) s := sum.WithLabelValues(string('A' + i))
s.Write(m) s.(Summary).Write(m)
if got, want := int(*m.Summary.SampleCount), len(allVars[i]); got != want { if got, want := int(*m.Summary.SampleCount), len(allVars[i]); got != want {
t.Errorf("got sample count %d for label %c, want %d", got, 'A'+i, want) t.Errorf("got sample count %d for label %c, want %d", got, 'A'+i, want)
} }
@ -305,7 +371,7 @@ func TestSummaryDecay(t *testing.T) {
m := &dto.Metric{} m := &dto.Metric{}
i := 0 i := 0
tick := time.NewTicker(time.Millisecond) tick := time.NewTicker(time.Millisecond)
for _ = range tick.C { for range tick.C {
i++ i++
sum.Observe(float64(i)) sum.Observe(float64(i))
if i%10 == 0 { if i%10 == 0 {

@ -0,0 +1,184 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package testutil provides helpers to test code using the prometheus package
// of client_golang.
//
// While writing unit tests to verify correct instrumentation of your code, it's
// a common mistake to mostly test the instrumentation library instead of your
// own code. Rather than verifying that a prometheus.Counter's value has changed
// as expected or that it shows up in the exposition after registration, it is
// in general more robust and more faithful to the concept of unit tests to use
// mock implementations of the prometheus.Counter and prometheus.Registerer
// interfaces that simply assert that the Add or Register methods have been
// called with the expected arguments. However, this might be overkill in simple
// scenarios. The ToFloat64 function is provided for simple inspection of a
// single-value metric, but it has to be used with caution.
//
// End-to-end tests to verify all or larger parts of the metrics exposition can
// be implemented with the CollectAndCompare or GatherAndCompare functions. The
// most appropriate use is not so much testing instrumentation of your code, but
// testing custom prometheus.Collector implementations and in particular whole
// exporters, i.e. programs that retrieve telemetry data from a 3rd party source
// and convert it into Prometheus metrics.
package testutil
import (
"bytes"
"fmt"
"io"
"reflect"
"github.com/prometheus/common/expfmt"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/internal"
)
// ToFloat64 collects all Metrics from the provided Collector. It expects that
// this results in exactly one Metric being collected, which must be a Gauge,
// Counter, or Untyped. In all other cases, ToFloat64 panics. ToFloat64 returns
// the value of the collected Metric.
//
// The Collector provided is typically a simple instance of Gauge or Counter, or
// less commonly a GaugeVec or CounterVec with exactly one element. But any
// Collector fulfilling the prerequisites described above will do.
//
// Use this function with caution. It is computationally very expensive and thus
// not suited at all to read values from Metrics in regular code. This is really
// only for testing purposes, and even for testing, other approaches are often
// more appropriate (see this package's documentation).
//
// A clear anti-pattern would be to use a metric type from the prometheus
// package to track values that are also needed for something else than the
// exposition of Prometheus metrics. For example, you would like to track the
// number of items in a queue because your code should reject queuing further
// items if a certain limit is reached. It is tempting to track the number of
// items in a prometheus.Gauge, as it is then easily available as a metric for
// exposition, too. However, then you would need to call ToFloat64 in your
// regular code, potentially quite often. The recommended way is to track the
// number of items conventionally (in the way you would have done it without
// considering Prometheus metrics) and then expose the number with a
// prometheus.GaugeFunc.
func ToFloat64(c prometheus.Collector) float64 {
var (
m prometheus.Metric
mCount int
mChan = make(chan prometheus.Metric)
done = make(chan struct{})
)
go func() {
for m = range mChan {
mCount++
}
close(done)
}()
c.Collect(mChan)
close(mChan)
<-done
if mCount != 1 {
panic(fmt.Errorf("collected %d metrics instead of exactly 1", mCount))
}
pb := &dto.Metric{}
m.Write(pb)
if pb.Gauge != nil {
return pb.Gauge.GetValue()
}
if pb.Counter != nil {
return pb.Counter.GetValue()
}
if pb.Untyped != nil {
return pb.Untyped.GetValue()
}
panic(fmt.Errorf("collected a non-gauge/counter/untyped metric: %s", pb))
}
// CollectAndCompare registers the provided Collector with a newly created
// pedantic Registry. It then does the same as GatherAndCompare, gathering the
// metrics from the pedantic Registry.
func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames ...string) error {
reg := prometheus.NewPedanticRegistry()
if err := reg.Register(c); err != nil {
return fmt.Errorf("registering collector failed: %s", err)
}
return GatherAndCompare(reg, expected, metricNames...)
}
// GatherAndCompare gathers all metrics from the provided Gatherer and compares
// it to an expected output read from the provided Reader in the Prometheus text
// exposition format. If any metricNames are provided, only metrics with those
// names are compared.
func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ...string) error {
metrics, err := g.Gather()
if err != nil {
return fmt.Errorf("gathering metrics failed: %s", err)
}
if metricNames != nil {
metrics = filterMetrics(metrics, metricNames)
}
var tp expfmt.TextParser
expectedMetrics, err := tp.TextToMetricFamilies(expected)
if err != nil {
return fmt.Errorf("parsing expected metrics failed: %s", err)
}
if !reflect.DeepEqual(metrics, internal.NormalizeMetricFamilies(expectedMetrics)) {
// Encode the gathered output to the readable text format for comparison.
var buf1 bytes.Buffer
enc := expfmt.NewEncoder(&buf1, expfmt.FmtText)
for _, mf := range metrics {
if err := enc.Encode(mf); err != nil {
return fmt.Errorf("encoding result failed: %s", err)
}
}
// Encode normalized expected metrics again to generate them in the same ordering
// the registry does to spot differences more easily.
var buf2 bytes.Buffer
enc = expfmt.NewEncoder(&buf2, expfmt.FmtText)
for _, mf := range internal.NormalizeMetricFamilies(expectedMetrics) {
if err := enc.Encode(mf); err != nil {
return fmt.Errorf("encoding result failed: %s", err)
}
}
return fmt.Errorf(`
metric output does not match expectation; want:
%s
got:
%s
`, buf2.String(), buf1.String())
}
return nil
}
func filterMetrics(metrics []*dto.MetricFamily, names []string) []*dto.MetricFamily {
var filtered []*dto.MetricFamily
for _, m := range metrics {
for _, name := range names {
if m.GetName() == name {
filtered = append(filtered, m)
break
}
}
}
return filtered
}

@ -0,0 +1,213 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package testutil
import (
"strings"
"testing"
"github.com/prometheus/client_golang/prometheus"
)
type untypedCollector struct{}
func (u untypedCollector) Describe(c chan<- *prometheus.Desc) {
c <- prometheus.NewDesc("name", "help", nil, nil)
}
func (u untypedCollector) Collect(c chan<- prometheus.Metric) {
c <- prometheus.MustNewConstMetric(
prometheus.NewDesc("name", "help", nil, nil),
prometheus.UntypedValue,
2001,
)
}
func TestToFloat64(t *testing.T) {
gaugeWithAValueSet := prometheus.NewGauge(prometheus.GaugeOpts{})
gaugeWithAValueSet.Set(3.14)
counterVecWithOneElement := prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"foo"})
counterVecWithOneElement.WithLabelValues("bar").Inc()
counterVecWithTwoElements := prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"foo"})
counterVecWithTwoElements.WithLabelValues("bar").Add(42)
counterVecWithTwoElements.WithLabelValues("baz").Inc()
histogramVecWithOneElement := prometheus.NewHistogramVec(prometheus.HistogramOpts{}, []string{"foo"})
histogramVecWithOneElement.WithLabelValues("bar").Observe(2.7)
scenarios := map[string]struct {
collector prometheus.Collector
panics bool
want float64
}{
"simple counter": {
collector: prometheus.NewCounter(prometheus.CounterOpts{}),
panics: false,
want: 0,
},
"simple gauge": {
collector: prometheus.NewGauge(prometheus.GaugeOpts{}),
panics: false,
want: 0,
},
"simple untyped": {
collector: untypedCollector{},
panics: false,
want: 2001,
},
"simple histogram": {
collector: prometheus.NewHistogram(prometheus.HistogramOpts{}),
panics: true,
},
"simple summary": {
collector: prometheus.NewSummary(prometheus.SummaryOpts{}),
panics: true,
},
"simple gauge with an actual value set": {
collector: gaugeWithAValueSet,
panics: false,
want: 3.14,
},
"counter vec with zero elements": {
collector: prometheus.NewCounterVec(prometheus.CounterOpts{}, nil),
panics: true,
},
"counter vec with one element": {
collector: counterVecWithOneElement,
panics: false,
want: 1,
},
"counter vec with two elements": {
collector: counterVecWithTwoElements,
panics: true,
},
"histogram vec with one element": {
collector: histogramVecWithOneElement,
panics: true,
},
}
for n, s := range scenarios {
t.Run(n, func(t *testing.T) {
defer func() {
r := recover()
if r == nil && s.panics {
t.Error("expected panic")
} else if r != nil && !s.panics {
t.Error("unexpected panic: ", r)
}
// Any other combination is the expected outcome.
}()
if got := ToFloat64(s.collector); got != s.want {
t.Errorf("want %f, got %f", s.want, got)
}
})
}
}
func TestCollectAndCompare(t *testing.T) {
const metadata = `
# HELP some_total A value that represents a counter.
# TYPE some_total counter
`
c := prometheus.NewCounter(prometheus.CounterOpts{
Name: "some_total",
Help: "A value that represents a counter.",
ConstLabels: prometheus.Labels{
"label1": "value1",
},
})
c.Inc()
expected := `
some_total{ label1 = "value1" } 1
`
if err := CollectAndCompare(c, strings.NewReader(metadata+expected), "some_total"); err != nil {
t.Errorf("unexpected collecting result:\n%s", err)
}
}
func TestNoMetricFilter(t *testing.T) {
const metadata = `
# HELP some_total A value that represents a counter.
# TYPE some_total counter
`
c := prometheus.NewCounter(prometheus.CounterOpts{
Name: "some_total",
Help: "A value that represents a counter.",
ConstLabels: prometheus.Labels{
"label1": "value1",
},
})
c.Inc()
expected := `
some_total{label1="value1"} 1
`
if err := CollectAndCompare(c, strings.NewReader(metadata+expected)); err != nil {
t.Errorf("unexpected collecting result:\n%s", err)
}
}
func TestMetricNotFound(t *testing.T) {
const metadata = `
# HELP some_other_metric A value that represents a counter.
# TYPE some_other_metric counter
`
c := prometheus.NewCounter(prometheus.CounterOpts{
Name: "some_total",
Help: "A value that represents a counter.",
ConstLabels: prometheus.Labels{
"label1": "value1",
},
})
c.Inc()
expected := `
some_other_metric{label1="value1"} 1
`
expectedError := `
metric output does not match expectation; want:
# HELP some_other_metric A value that represents a counter.
# TYPE some_other_metric counter
some_other_metric{label1="value1"} 1
got:
# HELP some_total A value that represents a counter.
# TYPE some_total counter
some_total{label1="value1"} 1
`
err := CollectAndCompare(c, strings.NewReader(metadata+expected))
if err == nil {
t.Error("Expected error, got no error.")
}
if err.Error() != expectedError {
t.Errorf("Expected\n%#+v\nGot:\n%#+v\n", expectedError, err.Error())
}
}

@ -0,0 +1,51 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import "time"
// Timer is a helper type to time functions. Use NewTimer to create new
// instances.
type Timer struct {
begin time.Time
observer Observer
}
// NewTimer creates a new Timer. The provided Observer is used to observe a
// duration in seconds. Timer is usually used to time a function call in the
// following way:
// func TimeMe() {
// timer := NewTimer(myHistogram)
// defer timer.ObserveDuration()
// // Do actual work.
// }
func NewTimer(o Observer) *Timer {
return &Timer{
begin: time.Now(),
observer: o,
}
}
// ObserveDuration records the duration passed since the Timer was created with
// NewTimer. It calls the Observe method of the Observer provided during
// construction with the duration in seconds as an argument. ObserveDuration is
// usually called with a defer statement.
//
// Note that this method is only guaranteed to never observe negative durations
// if used with Go1.9+.
func (t *Timer) ObserveDuration() {
if t.observer != nil {
t.observer.Observe(time.Since(t.begin).Seconds())
}
}

@ -0,0 +1,152 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"testing"
dto "github.com/prometheus/client_model/go"
)
func TestTimerObserve(t *testing.T) {
var (
his = NewHistogram(HistogramOpts{Name: "test_histogram"})
sum = NewSummary(SummaryOpts{Name: "test_summary"})
gauge = NewGauge(GaugeOpts{Name: "test_gauge"})
)
func() {
hisTimer := NewTimer(his)
sumTimer := NewTimer(sum)
gaugeTimer := NewTimer(ObserverFunc(gauge.Set))
defer hisTimer.ObserveDuration()
defer sumTimer.ObserveDuration()
defer gaugeTimer.ObserveDuration()
}()
m := &dto.Metric{}
his.Write(m)
if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
t.Errorf("want %d observations for histogram, got %d", want, got)
}
m.Reset()
sum.Write(m)
if want, got := uint64(1), m.GetSummary().GetSampleCount(); want != got {
t.Errorf("want %d observations for summary, got %d", want, got)
}
m.Reset()
gauge.Write(m)
if got := m.GetGauge().GetValue(); got <= 0 {
t.Errorf("want value > 0 for gauge, got %f", got)
}
}
func TestTimerEmpty(t *testing.T) {
emptyTimer := NewTimer(nil)
emptyTimer.ObserveDuration()
// Do nothing, just demonstrate it works without panic.
}
func TestTimerConditionalTiming(t *testing.T) {
var (
his = NewHistogram(HistogramOpts{
Name: "test_histogram",
})
timeMe = true
m = &dto.Metric{}
)
timedFunc := func() {
timer := NewTimer(ObserverFunc(func(v float64) {
if timeMe {
his.Observe(v)
}
}))
defer timer.ObserveDuration()
}
timedFunc() // This will time.
his.Write(m)
if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
t.Errorf("want %d observations for histogram, got %d", want, got)
}
timeMe = false
timedFunc() // This will not time again.
m.Reset()
his.Write(m)
if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
t.Errorf("want %d observations for histogram, got %d", want, got)
}
}
func TestTimerByOutcome(t *testing.T) {
var (
his = NewHistogramVec(
HistogramOpts{Name: "test_histogram"},
[]string{"outcome"},
)
outcome = "foo"
m = &dto.Metric{}
)
timedFunc := func() {
timer := NewTimer(ObserverFunc(func(v float64) {
his.WithLabelValues(outcome).Observe(v)
}))
defer timer.ObserveDuration()
if outcome == "foo" {
outcome = "bar"
return
}
outcome = "foo"
}
timedFunc()
his.WithLabelValues("foo").(Histogram).Write(m)
if want, got := uint64(0), m.GetHistogram().GetSampleCount(); want != got {
t.Errorf("want %d observations for 'foo' histogram, got %d", want, got)
}
m.Reset()
his.WithLabelValues("bar").(Histogram).Write(m)
if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
t.Errorf("want %d observations for 'bar' histogram, got %d", want, got)
}
timedFunc()
m.Reset()
his.WithLabelValues("foo").(Histogram).Write(m)
if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
t.Errorf("want %d observations for 'foo' histogram, got %d", want, got)
}
m.Reset()
his.WithLabelValues("bar").(Histogram).Write(m)
if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
t.Errorf("want %d observations for 'bar' histogram, got %d", want, got)
}
timedFunc()
m.Reset()
his.WithLabelValues("foo").(Histogram).Write(m)
if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
t.Errorf("want %d observations for 'foo' histogram, got %d", want, got)
}
m.Reset()
his.WithLabelValues("bar").(Histogram).Write(m)
if want, got := uint64(2), m.GetHistogram().GetSampleCount(); want != got {
t.Errorf("want %d observations for 'bar' histogram, got %d", want, got)
}
}

@ -13,108 +13,12 @@
package prometheus package prometheus
// Untyped is a Metric that represents a single numerical value that can
// arbitrarily go up and down.
//
// An Untyped metric works the same as a Gauge. The only difference is that to
// no type information is implied.
//
// To create Untyped instances, use NewUntyped.
type Untyped interface {
Metric
Collector
// Set sets the Untyped metric to an arbitrary value.
Set(float64)
// Inc increments the Untyped metric by 1.
Inc()
// Dec decrements the Untyped metric by 1.
Dec()
// Add adds the given value to the Untyped metric. (The value can be
// negative, resulting in a decrease.)
Add(float64)
// Sub subtracts the given value from the Untyped metric. (The value can
// be negative, resulting in an increase.)
Sub(float64)
}
// UntypedOpts is an alias for Opts. See there for doc comments. // UntypedOpts is an alias for Opts. See there for doc comments.
type UntypedOpts Opts type UntypedOpts Opts
// NewUntyped creates a new Untyped metric from the provided UntypedOpts. // UntypedFunc works like GaugeFunc but the collected metric is of type
func NewUntyped(opts UntypedOpts) Untyped { // "Untyped". UntypedFunc is useful to mirror an external metric of unknown
return newValue(NewDesc( // type.
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
nil,
opts.ConstLabels,
), UntypedValue, 0)
}
// UntypedVec is a Collector that bundles a set of Untyped metrics that all
// share the same Desc, but have different values for their variable
// labels. This is used if you want to count the same thing partitioned by
// various dimensions. Create instances with NewUntypedVec.
type UntypedVec struct {
*MetricVec
}
// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
// partitioned by the given label names. At least one label name must be
// provided.
func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
labelNames,
opts.ConstLabels,
)
return &UntypedVec{
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newValue(desc, UntypedValue, 0, lvs...)
}),
}
}
// GetMetricWithLabelValues replaces the method of the same name in
// MetricVec. The difference is that this method returns an Untyped and not a
// Metric so that no type conversion is required.
func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) {
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Untyped), err
}
return nil, err
}
// GetMetricWith replaces the method of the same name in MetricVec. The
// difference is that this method returns an Untyped and not a Metric so that no
// type conversion is required.
func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) {
metric, err := m.MetricVec.GetMetricWith(labels)
if metric != nil {
return metric.(Untyped), err
}
return nil, err
}
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. By not returning an
// error, WithLabelValues allows shortcuts like
// myVec.WithLabelValues("404", "GET").Add(42)
func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped {
return m.MetricVec.WithLabelValues(lvs...).(Untyped)
}
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. By not returning an error, With allows shortcuts like
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
func (m *UntypedVec) With(labels Labels) Untyped {
return m.MetricVec.With(labels).(Untyped)
}
// UntypedFunc is an Untyped whose value is determined at collect time by
// calling a provided function.
// //
// To create UntypedFunc instances, use NewUntypedFunc. // To create UntypedFunc instances, use NewUntypedFunc.
type UntypedFunc interface { type UntypedFunc interface {

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save