Commit 9bba8a6e authored by Annanay's avatar Annanay
Browse files

Merge branch 'master' into appender-context



Signed-off-by: default avatarAnnanay <annanayagarwal@gmail.com>
parents 89129cd3 01e3bfcd
......@@ -41,7 +41,7 @@ jobs:
GOOPTS: "-p 2"
GOMAXPROCS: "2"
- prometheus/check_proto:
version: "3.11.4"
version: "3.12.3"
- prometheus/store_artifact:
file: prometheus
- prometheus/store_artifact:
......
......@@ -8,7 +8,7 @@
* `prometheus-mixin`: @beorn7
* `storage`
* `remote`: @csmarchbanks, @cstyan, @bwplotka
* `tsdb`: @codesome, @krasi-georgiev
* `tsdb`: @codesome, @krasi-georgiev, @bwplotka
* `web`
* `ui`: @juliusv
* `Makefile` and related build configuration: @simonpasquier, @SuperQ
......
......@@ -30,7 +30,6 @@ import (
"runtime"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
......@@ -47,6 +46,7 @@ import (
"github.com/prometheus/common/version"
jcfg "github.com/uber/jaeger-client-go/config"
jprom "github.com/uber/jaeger-lib/metrics/prometheus"
"go.uber.org/atomic"
kingpin "gopkg.in/alecthomas/kingpin.v2"
"k8s.io/klog"
......@@ -696,7 +696,13 @@ func main() {
return errors.Wrapf(err, "opening storage failed")
}
level.Info(logger).Log("fs_type", prom_runtime.Statfs(cfg.localStoragePath))
switch fsType := prom_runtime.Statfs(cfg.localStoragePath); fsType {
case "NFS_SUPER_MAGIC":
level.Warn(logger).Log("fs_type", fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.")
default:
level.Info(logger).Log("fs_type", fsType)
}
level.Info(logger).Log("msg", "TSDB started")
level.Debug(logger).Log("msg", "TSDB options",
"MinBlockDuration", cfg.tsdb.MinBlockDuration,
......@@ -801,18 +807,18 @@ func openDBWithMetrics(dir string, logger log.Logger, reg prometheus.Registerer,
}
type safePromQLNoStepSubqueryInterval struct {
value int64
value atomic.Int64
}
func durationToInt64Millis(d time.Duration) int64 {
return int64(d / time.Millisecond)
}
func (i *safePromQLNoStepSubqueryInterval) Set(ev model.Duration) {
atomic.StoreInt64(&i.value, durationToInt64Millis(time.Duration(ev)))
i.value.Store(durationToInt64Millis(time.Duration(ev)))
}
func (i *safePromQLNoStepSubqueryInterval) Get(int64) int64 {
return atomic.LoadInt64(&i.value)
return i.value.Load()
}
func reloadConfig(filename string, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, rls ...func(*config.Config) error) (err error) {
......
......@@ -363,15 +363,16 @@ func printBlocks(blocks []tsdb.BlockReader, humanReadable bool) {
tw := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
defer tw.Flush()
fmt.Fprintln(tw, "BLOCK ULID\tMIN TIME\tMAX TIME\tNUM SAMPLES\tNUM CHUNKS\tNUM SERIES")
fmt.Fprintln(tw, "BLOCK ULID\tMIN TIME\tMAX TIME\tDURATION\tNUM SAMPLES\tNUM CHUNKS\tNUM SERIES")
for _, b := range blocks {
meta := b.Meta()
fmt.Fprintf(tw,
"%v\t%v\t%v\t%v\t%v\t%v\n",
"%v\t%v\t%v\t%v\t%v\t%v\t%v\n",
meta.ULID,
getFormatedTime(meta.MinTime, humanReadable),
getFormatedTime(meta.MaxTime, humanReadable),
time.Duration(meta.MaxTime-meta.MinTime)*time.Millisecond,
meta.Stats.NumSamples,
meta.Stats.NumChunks,
meta.Stats.NumSeries,
......
......@@ -18,8 +18,13 @@ import (
"testing"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
func TestMapFromVMWithEmptyTags(t *testing.T) {
id := "test"
name := "name"
......
......@@ -138,5 +138,10 @@ func (c *ServiceDiscoveryConfig) Validate() error {
return errors.New("empty or null section in static_configs")
}
}
for _, cfg := range c.TritonSDConfigs {
if cfg == nil {
return errors.New("empty or null section in triton_sd_configs")
}
}
return nil
}
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"fmt"
"reflect"
"strings"
"testing"
"github.com/prometheus/prometheus/util/testutil"
"gopkg.in/yaml.v2"
)
func TestForNilSDConfig(t *testing.T) {
// Get all the yaml fields names of the ServiceDiscoveryConfig struct.
s := reflect.ValueOf(ServiceDiscoveryConfig{})
configType := s.Type()
n := s.NumField()
fieldsSlice := make([]string, n)
for i := 0; i < n; i++ {
field := configType.Field(i)
tag := field.Tag.Get("yaml")
tag = strings.Split(tag, ",")[0]
fieldsSlice = append(fieldsSlice, tag)
}
// Unmarshall all possible yaml keys and validate errors check upon nil
// SD config.
for _, f := range fieldsSlice {
if f == "" {
continue
}
t.Run(f, func(t *testing.T) {
c := &ServiceDiscoveryConfig{}
err := yaml.Unmarshal([]byte(fmt.Sprintf(`
---
%s:
-
`, f)), c)
testutil.Ok(t, err)
err = c.Validate()
testutil.NotOk(t, err)
testutil.Equals(t, fmt.Sprintf("empty or null section in %s", f), err.Error())
})
}
}
......@@ -26,8 +26,13 @@ import (
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/testutil"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
func TestConfiguredService(t *testing.T) {
conf := &SDConfig{
Services: []string{"configuredServiceName"}}
......@@ -283,10 +288,14 @@ func TestAllServices(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ch := make(chan []*targetgroup.Group)
go d.Run(ctx, ch)
go func() {
d.Run(ctx, ch)
close(ch)
}()
checkOneTarget(t, <-ch)
checkOneTarget(t, <-ch)
cancel()
<-ch
}
// Watch only the test service.
......@@ -319,9 +328,13 @@ func TestAllOptions(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ch := make(chan []*targetgroup.Group)
go d.Run(ctx, ch)
go func() {
d.Run(ctx, ch)
close(ch)
}()
checkOneTarget(t, <-ch)
cancel()
<-ch
}
func TestGetDatacenterShouldReturnError(t *testing.T) {
......
......@@ -35,7 +35,10 @@ import (
const (
resolvConf = "/etc/resolv.conf"
dnsNameLabel = model.MetaLabelPrefix + "dns_name"
dnsNameLabel = model.MetaLabelPrefix + "dns_name"
dnsSrvRecordPrefix = model.MetaLabelPrefix + "dns_srv_record_"
dnsSrvRecordTargetLabel = dnsSrvRecordPrefix + "target"
dnsSrvRecordPortLabel = dnsSrvRecordPrefix + "port"
// Constants for instrumentation.
namespace = "prometheus"
......@@ -183,9 +186,13 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
}
for _, record := range response.Answer {
var target model.LabelValue
var target, dnsSrvRecordTarget, dnsSrvRecordPort model.LabelValue
switch addr := record.(type) {
case *dns.SRV:
dnsSrvRecordTarget = model.LabelValue(addr.Target)
dnsSrvRecordPort = model.LabelValue(fmt.Sprintf("%d", addr.Port))
// Remove the final dot from rooted DNS names to make them look more usual.
addr.Target = strings.TrimRight(addr.Target, ".")
......@@ -199,8 +206,10 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
continue
}
tg.Targets = append(tg.Targets, model.LabelSet{
model.AddressLabel: target,
dnsNameLabel: model.LabelValue(name),
model.AddressLabel: target,
dnsNameLabel: model.LabelValue(name),
dnsSrvRecordTargetLabel: dnsSrvRecordTarget,
dnsSrvRecordPortLabel: dnsSrvRecordPort,
})
}
......
......@@ -22,6 +22,7 @@ import (
"github.com/go-kit/kit/log"
"github.com/miekg/dns"
"go.uber.org/goleak"
"gopkg.in/yaml.v2"
"github.com/prometheus/common/model"
......@@ -29,6 +30,10 @@ import (
"github.com/prometheus/prometheus/util/testutil"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
func TestDNS(t *testing.T) {
testCases := []struct {
name string
......@@ -70,7 +75,12 @@ func TestDNS(t *testing.T) {
{
Source: "web.example.com.",
Targets: []model.LabelSet{
{"__address__": "192.0.2.2:80", "__meta_dns_name": "web.example.com."},
{
"__address__": "192.0.2.2:80",
"__meta_dns_name": "web.example.com.",
"__meta_dns_srv_record_target": "",
"__meta_dns_srv_record_port": "",
},
},
},
},
......@@ -95,7 +105,12 @@ func TestDNS(t *testing.T) {
{
Source: "web.example.com.",
Targets: []model.LabelSet{
{"__address__": "[::1]:80", "__meta_dns_name": "web.example.com."},
{
"__address__": "[::1]:80",
"__meta_dns_name": "web.example.com.",
"__meta_dns_srv_record_target": "",
"__meta_dns_srv_record_port": "",
},
},
},
},
......@@ -120,8 +135,18 @@ func TestDNS(t *testing.T) {
{
Source: "_mysql._tcp.db.example.com.",
Targets: []model.LabelSet{
{"__address__": "db1.example.com:3306", "__meta_dns_name": "_mysql._tcp.db.example.com."},
{"__address__": "db2.example.com:3306", "__meta_dns_name": "_mysql._tcp.db.example.com."},
{
"__address__": "db1.example.com:3306",
"__meta_dns_name": "_mysql._tcp.db.example.com.",
"__meta_dns_srv_record_target": "db1.example.com.",
"__meta_dns_srv_record_port": "3306",
},
{
"__address__": "db2.example.com:3306",
"__meta_dns_name": "_mysql._tcp.db.example.com.",
"__meta_dns_srv_record_target": "db2.example.com.",
"__meta_dns_srv_record_port": "3306",
},
},
},
},
......@@ -145,7 +170,12 @@ func TestDNS(t *testing.T) {
{
Source: "_mysql._tcp.db.example.com.",
Targets: []model.LabelSet{
{"__address__": "db1.example.com:3306", "__meta_dns_name": "_mysql._tcp.db.example.com."},
{
"__address__": "db1.example.com:3306",
"__meta_dns_name": "_mysql._tcp.db.example.com.",
"__meta_dns_srv_record_target": "db1.example.com.",
"__meta_dns_srv_record_port": "3306",
},
},
},
},
......
......@@ -57,42 +57,68 @@ func (d *Discovery) refreshServices(ctx context.Context) ([]*targetgroup.Group,
}
for _, s := range services {
for _, e := range s.Endpoint.Ports {
if e.Protocol != swarm.PortConfigProtocolTCP {
continue
commonLabels := map[string]string{
swarmLabelServiceID: s.ID,
swarmLabelServiceName: s.Spec.Name,
swarmLabelServiceTaskContainerHostname: s.Spec.TaskTemplate.ContainerSpec.Hostname,
swarmLabelServiceTaskContainerImage: s.Spec.TaskTemplate.ContainerSpec.Image,
}
commonLabels[swarmLabelServiceMode] = getServiceValueMode(s)
if s.UpdateStatus != nil {
commonLabels[swarmLabelServiceUpdatingStatus] = string(s.UpdateStatus.State)
}
for k, v := range s.Spec.Labels {
ln := strutil.SanitizeLabelName(k)
commonLabels[swarmLabelServiceLabelPrefix+ln] = v
}
for _, p := range s.Endpoint.VirtualIPs {
var added bool
ip, _, err := net.ParseCIDR(p.Addr)
if err != nil {
return nil, fmt.Errorf("error while parsing address %s: %w", p.Addr, err)
}
for _, p := range s.Endpoint.VirtualIPs {
for _, e := range s.Endpoint.Ports {
if e.Protocol != swarm.PortConfigProtocolTCP {
continue
}
labels := model.LabelSet{
swarmLabelServiceEndpointPortName: model.LabelValue(e.Name),
swarmLabelServiceEndpointPortPublishMode: model.LabelValue(e.PublishMode),
swarmLabelServiceID: model.LabelValue(s.ID),
swarmLabelServiceName: model.LabelValue(s.Spec.Name),
swarmLabelServiceTaskContainerHostname: model.LabelValue(s.Spec.TaskTemplate.ContainerSpec.Hostname),
swarmLabelServiceTaskContainerImage: model.LabelValue(s.Spec.TaskTemplate.ContainerSpec.Image),
}
labels[swarmLabelServiceMode] = model.LabelValue(getServiceValueMode(s))
if s.UpdateStatus != nil {
labels[swarmLabelServiceUpdatingStatus] = model.LabelValue(s.UpdateStatus.State)
for k, v := range commonLabels {
labels[model.LabelName(k)] = model.LabelValue(v)
}
for k, v := range s.Spec.Labels {
ln := strutil.SanitizeLabelName(k)
labels[model.LabelName(swarmLabelServiceLabelPrefix+ln)] = model.LabelValue(v)
for k, v := range networkLabels[p.NetworkID] {
labels[model.LabelName(k)] = model.LabelValue(v)
}
ip, _, err := net.ParseCIDR(p.Addr)
if err != nil {
return nil, fmt.Errorf("error while parsing address %s: %w", p.Addr, err)
}
addr := net.JoinHostPort(ip.String(), strconv.FormatUint(uint64(e.PublishedPort), 10))
labels[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, labels)
added = true
}
if !added {
labels := model.LabelSet{}
for k, v := range commonLabels {
labels[model.LabelName(k)] = model.LabelValue(v)
}
for k, v := range networkLabels[p.NetworkID] {
labels[model.LabelName(k)] = model.LabelValue(v)
}
addr := net.JoinHostPort(ip.String(), fmt.Sprintf("%d", d.port))
labels[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, labels)
}
}
......
This diff is collapsed.
......@@ -106,12 +106,19 @@ func (d *Discovery) refreshTasks(ctx context.Context) ([]*targetgroup.Group, err
tg.Targets = append(tg.Targets, labels)
}
for _, p := range servicePorts[s.ServiceID] {
if p.Protocol != swarm.PortConfigProtocolTCP {
continue
}
for _, network := range s.NetworksAttachments {
for _, address := range network.Addresses {
for _, network := range s.NetworksAttachments {
for _, address := range network.Addresses {
var added bool
ip, _, err := net.ParseCIDR(address)
if err != nil {
return nil, fmt.Errorf("error while parsing address %s: %w", address, err)
}
for _, p := range servicePorts[s.ServiceID] {
if p.Protocol != swarm.PortConfigProtocolTCP {
continue
}
labels := model.LabelSet{
swarmLabelTaskPortMode: model.LabelValue(p.PublishMode),
}
......@@ -124,13 +131,26 @@ func (d *Discovery) refreshTasks(ctx context.Context) ([]*targetgroup.Group, err
labels[model.LabelName(k)] = model.LabelValue(v)
}
ip, _, err := net.ParseCIDR(address)
if err != nil {
return nil, fmt.Errorf("error while parsing address %s: %w", address, err)
}
addr := net.JoinHostPort(ip.String(), strconv.FormatUint(uint64(p.PublishedPort), 10))
labels[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, labels)
added = true
}
if !added {
labels := model.LabelSet{}
for k, v := range commonLabels {
labels[model.LabelName(k)] = model.LabelValue(v)
}
for k, v := range networkLabels[network.Network.ID] {
labels[model.LabelName(k)] = model.LabelValue(v)
}
addr := net.JoinHostPort(ip.String(), fmt.Sprintf("%d", d.port))
labels[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, labels)
}
}
......
This diff is collapsed.
......@@ -26,11 +26,16 @@ import (
"time"
"github.com/prometheus/common/model"
"go.uber.org/goleak"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/testutil"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
const defaultWait = time.Second
type testRunner struct {
......
......@@ -29,6 +29,10 @@ import (
"github.com/prometheus/prometheus/util/testutil"
)
func TestMain(m *testing.M) {
testutil.TolerantVerifyLeak(m)
}
// makeDiscovery creates a kubernetes.Discovery instance for testing.
func makeDiscovery(role Role, nsDiscovery NamespaceDiscovery, objects ...runtime.Object) (*Discovery, kubernetes.Interface) {
clientset := fake.NewSimpleClientset(objects...)
......
......@@ -25,7 +25,7 @@ import (
"time"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus/testutil"
client_testutil "github.com/prometheus/client_golang/prometheus/testutil"
common_config "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/config"
......@@ -33,9 +33,14 @@ import (
"github.com/prometheus/prometheus/discovery/consul"
"github.com/prometheus/prometheus/discovery/file"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/testutil"
"gopkg.in/yaml.v2"
)
func TestMain(m *testing.M) {
testutil.TolerantVerifyLeak(m)
}
// TestTargetUpdatesOrder checks that the target updates are received in the expected order.
func TestTargetUpdatesOrder(t *testing.T) {
......@@ -984,7 +989,7 @@ func TestGaugeFailedConfigs(t *testing.T) {
discoveryManager.ApplyConfig(c)
<-discoveryManager.SyncCh()
failedCount := testutil.ToFloat64(failedConfigs)
failedCount := client_testutil.ToFloat64(failedConfigs)
if failedCount != 3 {
t.Fatalf("Expected to have 3 failed configs, got: %v", failedCount)
}
......@@ -1004,7 +1009,7 @@ func TestGaugeFailedConfigs(t *testing.T) {
discoveryManager.ApplyConfig(c)
<-discoveryManager.SyncCh()
failedCount = testutil.ToFloat64(failedConfigs)
failedCount = client_testutil.ToFloat64(failedConfigs)
if failedCount != 0 {
t.Fatalf("Expected to get no failed config, got: %v", failedCount)
}
......
......@@ -20,11 +20,16 @@ import (
"time"
"github.com/prometheus/common/model"
"go.uber.org/goleak"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/testutil"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
func TestRefresh(t *testing.T) {
tg1 := []*targetgroup.Group{
{
......
......@@ -18,8 +18,13 @@ import (
"time"
"github.com/prometheus/common/model"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
func TestNewDiscoveryError(t *testing.T) {
_, err := NewDiscovery(
[]string{"unreachable.test"},
......
......@@ -459,7 +459,10 @@ One of the following roles can be configured to discover targets:
#### `services`
The `services` role is used to discover [Swarm services](https://docs.docker.com/engine/swarm/key-concepts/#services-and-tasks).
The `services` role discovers all [Swarm services](https://docs.docker.com/engine/swarm/key-concepts/#services-and-tasks)
and exposes their ports as targets. For each published port of a service, a
single target is generated. If a service has no published ports, a target per
service is created using the `port` parameter defined in the SD configuration.
Available meta labels:
......@@ -481,7 +484,10 @@ Available meta labels:
#### `tasks`
The `tasks` role is used to discover [Swarm tasks](https://docs.docker.com/engine/swarm/key-concepts/#services-and-tasks).
The `tasks` role discovers all [Swarm tasks](https://docs.docker.com/engine/swarm/key-concepts/#services-and-tasks)
and exposes their ports as targets. For each published port of a task, a single
target is generated. If a task has no published ports, a target per task is