Documentation
¶
Overview ¶
Package base defines Iter8's experiment, task and metric data structures. It contains the core logic for running an experiment.
Index ¶
- Constants
- Variables
- func BoolPointer(b bool) *bool
- func CompletePath(prefix string, suffix string) string
- func NormalizeMetricName(m string) (string, error)
- func RunExperiment(driver Driver) error
- func SetupWithMock(t *testing.T)
- func StringPointer(s string) *string
- func Uniq(list interface{}) []interface{}
- type AggregationType
- type CollectDatabaseTemplate
- type Driver
- type Experiment
- type ExperimentResult
- type ExperimentSpec
- type HistBucket
- type Insights
- type Metric
- type MetricMeta
- type MetricType
- type Params
- type SLO
- type Task
- type TaskMeta
Constants ¶
const (
// CounterMetricType corresponds to Prometheus Counter metric type
CounterMetricType MetricType = "Counter"
// GaugeMetricType corresponds to Prometheus Gauge metric type
GaugeMetricType MetricType = "Gauge"
// HistogramMetricType corresponds to a Histogram metric type
HistogramMetricType MetricType = "Histogram"
// SampleMetricType corresponds to a Sample metric type
SampleMetricType MetricType = "Sample"
// CountAggregator corresponds to aggregation of type count
CountAggregator AggregationType = "count"
// MeanAggregator corresponds to aggregation of type mean
MeanAggregator AggregationType = "mean"
// StddevAggregator corresponds to aggregation of type stddev
StdDevAggregator AggregationType = "stddev"
// MinAggregator corresponds to aggregation of type min
MinAggregator AggregationType = "min"
// MaxAggregator corresponds to aggregation of type max
MaxAggregator AggregationType = "max"
// PercentileAggregator corresponds to aggregation of type max
PercentileAggregator AggregationType = "percentile"
// PercentileAggregatorPrefix corresponds to prefix for percentiles
PercentileAggregatorPrefix = "p"
)
const (
// AssessTaskName is the name of the task this file implements
AssessTaskName = "assess-app-versions"
)
const (
// collectDatabaseTaskName is the name of this task which performs load generation and metrics collection for gRPC services.
CollectDatabaseTaskName = "collect-metrics-database"
)
const (
// CollectGRPCTaskName is the name of this task which performs load generation and metrics collection for gRPC services.
CollectGRPCTaskName = "gen-load-and-collect-metrics-grpc"
)
const (
// CollectHTTPTaskName is the name of this task which performs load generation and metrics collection.
CollectHTTPTaskName = "gen-load-and-collect-metrics-http"
)
const (
// RunTaskName is the name of the run task which performs running of a shell script
RunTaskName = "run"
)
Variables ¶
var MajorMinor = "v0.9"
MajorMinor is the minor version of Iter8 set this manually whenever the major or minor version changes
Functions ¶
func BoolPointer ¶ added in v0.9.3
func BoolPointer(b bool) *bool
BoolPointer takes bool as input, creates a new variable with the input value, and returns a pointer to the variable
func CompletePath ¶
func CompletePath(prefix string, suffix string) string
CompletePath is a helper function for converting file paths, specified relative to the caller of this function, into absolute ones. CompletePath is useful in tests and enables deriving the absolute path of experiment YAML files.
func NormalizeMetricName ¶ added in v0.8.29
func NormalizeMetricName(m string) (string, error)
NormalizeMetricName normalizes percentile values in metric names
func RunExperiment ¶ added in v0.9.3
func RunExperiment(driver Driver) error
RunExperiment runs an experiment
func SetupWithMock ¶ added in v0.9.3
func SetupWithMock(t *testing.T)
SetupWithMock mocks an HTTP endpoint and registers and cleanup function
func StringPointer ¶ added in v0.8.8
func StringPointer(s string) *string
StringPointer takes string as input, creates a new variable with the input value, and returns a pointer to the variable
Types ¶
type AggregationType ¶ added in v0.8.29
type AggregationType string
AggregationType identifies the type of the metric aggregator.
type CollectDatabaseTemplate ¶ added in v0.9.3
type CollectDatabaseTemplate struct {
Url string `json:"url" yaml:"url"`
Headers map[string]string `json:"headers" yaml:"headers"`
Provider string `json:"provider" yaml:"provider"`
Method string `json:"method" yaml:"method"`
Metrics []Metric `json:"metrics" yaml:"metrics"`
}
type Driver ¶ added in v0.9.3
type Driver interface {
// ReadSpec reads the experiment spec
ReadSpec() (ExperimentSpec, error)
// ReadResult reads the experiment result
ReadResult() (*ExperimentResult, error)
// WriteResult writes the experiment result
WriteResult(r *ExperimentResult) error
}
Driver enables interacting with experiment result stored externally
type Experiment ¶
type Experiment struct {
// Tasks is the sequence of tasks that constitute this experiment
Tasks ExperimentSpec
// Result is the current results from this experiment.
// The experiment may not have completed in which case results may be partial.
Result *ExperimentResult
}
Experiment struct containing spec and result
func BuildExperiment ¶ added in v0.9.3
func BuildExperiment(withResult bool, driver Driver) (*Experiment, error)
BuildExperiment builds an experiment
func (*Experiment) Completed ¶ added in v0.9.3
func (exp *Experiment) Completed() bool
Completed returns true if the experiment is complete
type ExperimentResult ¶
type ExperimentResult struct {
// StartTime is the time when the experiment run started
StartTime time.Time `json:"startTime" yaml:"startTime"`
// NumCompletedTasks is the number of completed tasks
NumCompletedTasks int `json:"numCompletedTasks" yaml:"numCompletedTasks"`
// Failure is true if any of its tasks failed
Failure bool `json:"failure" yaml:"failure"`
// Insights produced in this experiment
Insights *Insights `json:"insights,omitempty" yaml:"insights,omitempty"`
// Iter8Version is the version of Iter8 CLI that created this result object
Iter8Version string `json:"iter8Version" yaml:"iter8Version"`
}
ExperimentResult defines the current results from the experiment
type ExperimentSpec ¶ added in v0.8.29
type ExperimentSpec []Task
ExperimentSpec specifies the set of tasks in this experiment
func (*ExperimentSpec) UnmarshalJSON ¶ added in v0.8.29
func (s *ExperimentSpec) UnmarshalJSON(data []byte) error
UnmarshallJSON will unmarshal an experiment spec from bytes This is a custom JSON unmarshaler
type HistBucket ¶ added in v0.8.29
type HistBucket struct {
// Lower endpoint of a histogram bucket
Lower float64 `json:"lower" yaml:"lower"`
// Upper endpoint of a histogram bucket
Upper float64 `json:"upper" yaml:"upper"`
// Count is the frequency count of the bucket
Count uint64 `json:"count" yaml:"count"`
}
HistBucket is a single bucket in a histogram
type Insights ¶
type Insights struct {
// NumVersions is the number of app versions detected by Iter8
NumVersions int `json:"numVersions" yaml:"numVersions"`
// MetricsInfo identifies the metrics involved in this experiment
MetricsInfo map[string]MetricMeta `json:"metricsInfo,omitempty" yaml:"metricsInfo,omitempty"`
// NonHistMetricValues:
// the outer slice must be the same length as the number of app versions
// the map key must match name of a metric in MetricsInfo
// the inner slice contains the list of all observed metric values for given version and given metric; float value [i]["foo/bar"][k] is the [k]th observation for version [i] for the metric bar under backend foo.
// this struct is meant exclusively for metrics of type other than histogram
NonHistMetricValues []map[string][]float64 `json:"nonHistMetricValues,omitempty" yaml:"nonHistMetricValues,omitempty"`
// HistMetricValues:
// the outer slice must be the same length as the number of app versions
// the map key must match name of a histogram metric in MetricsInfo
// the inner slice contains the list of all observed histogram buckets for a given version and given metric; value [i]["foo/bar"][k] is the [k]th observed bucket for version [i] for the hist metric `bar` under backend `foo`.
HistMetricValues []map[string][]HistBucket `json:"histMetricValues,omitempty" yaml:"histMetricValues,omitempty"`
// SLOs involved in this experiment
SLOs []SLO `json:"SLOs,omitempty" yaml:"SLOs,omitempty"`
// SLOsSatisfied:
// the outer slice must be of the same length as SLOs
// the length of the inner slice must be the number of app versions
// the boolean value at [i][j] indicate if SLO [i] is satisfied by version [j]
SLOsSatisfied [][]bool `json:"SLOsSatisfied,omitempty" yaml:"SLOsSatisfied,omitempty"`
}
Insights records the number of versions in this experiment, metric values and SLO indicators for each version, metrics metadata for all metrics, and SLO definitions for all SLOs
func (*Insights) GetMetricsInfo ¶ added in v0.8.30
func (in *Insights) GetMetricsInfo(nm string) (*MetricMeta, error)
GetMetricsInfo gets metric meta for the given normalized metric name
func (*Insights) ScalarMetricValue ¶ added in v0.8.29
func (in *Insights) ScalarMetricValue(i int, m string) *float64
ScalarMetricValue gets the value of the given scalar metric for the given version
type Metric ¶ added in v0.9.3
type Metric struct {
Name string `json:"name" yaml:"name"`
Description *string `json:"description,omitempty" yaml:"description,omitempty"`
Type string `json:"type" yaml:"type"`
Units *string `json:"units,omitempty" yaml:"units,omitempty"`
Params *[]Params `json:"params,omitempty" yaml:"params,omitempty"`
Body *string `json:"body,omitempty" yaml:"body,omitempty"`
JqExpression string `json:"jqExpression" yaml:"jqExpression"`
}
type MetricMeta ¶
type MetricMeta struct {
// Description is a human readable description of the metric
Description string `json:"description" yaml:"description"`
// Units for this metric (if any)
Units *string `json:"units,omitempty" yaml:"units,omitempty"`
// Type of the metric. Example: counter
Type MetricType `json:"type" yaml:"type"`
}
MetricMeta describes a metric
type Params ¶ added in v0.9.3
type Params struct {
Name string `json:"name" yaml:"name"`
Value string `json:"value" yaml:"value"`
}
type SLO ¶
type SLO struct {
// Metric is the fully qualified metric name in the backendName/metricName format
Metric string `json:"metric" yaml:"metric"`
// UpperLimit is the maximum acceptable value of the metric
UpperLimit *float64 `json:"upperLimit,omitempty" yaml:"upperLimit,omitempty"`
// LowerLimit is the minimum acceptable value of the metric
LowerLimit *float64 `json:"lowerLimit,omitempty" yaml:"lowerLimit,omitempty"`
}
SLO is a service level objective
type Task ¶
type Task interface {
// contains filtered or unexported methods
}
Task is the building block of an experiment spec An experiment spec is a sequence of tasks
type TaskMeta ¶ added in v0.8.29
type TaskMeta struct {
// Task is the name of the task
Task *string `json:"task,omitempty" yaml:"task,omitempty"`
// Run is the script used in a run task
// Specify either Task or Run but not both
Run *string `json:"run,omitempty" yaml:"run,omitempty"`
// If is the condition used to determine if this task needs to run
// If the condition is not satisfied, then it is skipped in an experiment
// Example: SLOs()
If *string `json:"if,omitempty" yaml:"if,omitempty"`
}
TaskMeta provides common fields used across all tasks