Documentation
¶
Index ¶
- Variables
- func NewTSDBSeriesIter(ctx context.Context, user string, f sharding.ForSeries, ...) (v1.Iterator[*v1.Series], error)
- type BloomGenerator
- type BloomTSDBStore
- func (b *BloomTSDBStore) LoadTSDB(ctx context.Context, table config.DayTable, tenant string, id tsdb.Identifier, ...) (v1.Iterator[*v1.Series], error)
- func (b *BloomTSDBStore) ResolveTSDBs(ctx context.Context, table config.DayTable, tenant string) ([]tsdb.SingleTenantTSDBIdentifier, error)
- func (b *BloomTSDBStore) UsersForPeriod(ctx context.Context, table config.DayTable) ([]string, error)
- type ChunkItersByFingerprint
- type ChunkLoader
- type Compactor
- type Config
- type FetchFunc
- type Fetcher
- type Keyspace
- type LazyBlockBuilderIterator
- type Limits
- type Metrics
- type RetentionConfig
- type RetentionLimits
- type RetentionManager
- type SimpleBloomController
- type SimpleBloomGenerator
- type StoreChunkLoader
- type TSDBStore
- type TSDBStores
- func (s *TSDBStores) LoadTSDB(ctx context.Context, table config.DayTable, tenant string, id tsdb.Identifier, ...) (v1.Iterator[*v1.Series], error)
- func (s *TSDBStores) ResolveTSDBs(ctx context.Context, table config.DayTable, tenant string) ([]tsdb.SingleTenantTSDBIdentifier, error)
- func (s *TSDBStores) UsersForPeriod(ctx context.Context, table config.DayTable) ([]string, error)
Constants ¶
This section is empty.
Variables ¶
var (
RingOp = ring.NewOp([]ring.InstanceState{ring.JOINING, ring.ACTIVE}, nil)
)
Functions ¶
Types ¶
type BloomGenerator ¶
type BloomGenerator interface {
Generate(ctx context.Context) (skippedBlocks []v1.BlockMetadata, toClose []io.Closer, results v1.Iterator[*v1.Block], err error)
}
Store is likely bound within. This allows specifying impls like ShardedStore<Store> to only request the shard-range needed from the existing store.
type BloomTSDBStore ¶
type BloomTSDBStore struct {
// contains filtered or unexported fields
}
BloomTSDBStore is a wrapper around the storage.Client interface which implements the TSDBStore interface for this pkg.
func NewBloomTSDBStore ¶
func NewBloomTSDBStore(storage storage.Client, logger log.Logger) *BloomTSDBStore
func (*BloomTSDBStore) LoadTSDB ¶
func (b *BloomTSDBStore) LoadTSDB( ctx context.Context, table config.DayTable, tenant string, id tsdb.Identifier, bounds v1.FingerprintBounds, ) (v1.Iterator[*v1.Series], error)
func (*BloomTSDBStore) ResolveTSDBs ¶
func (b *BloomTSDBStore) ResolveTSDBs(ctx context.Context, table config.DayTable, tenant string) ([]tsdb.SingleTenantTSDBIdentifier, error)
func (*BloomTSDBStore) UsersForPeriod ¶
type ChunkItersByFingerprint ¶
type ChunkItersByFingerprint struct {
// contains filtered or unexported fields
}
ChunkItersByFingerprint models the chunks belonging to a fingerprint
type ChunkLoader ¶
type ChunkLoader interface {
Load(ctx context.Context, userID string, series *v1.Series) (*ChunkItersByFingerprint, error)
}
ChunkLoader loads chunks from a store
type Compactor ¶
Bloom-compactor
This is a standalone service that is responsible for compacting TSDB indexes into bloomfilters. It creates and merges bloomfilters into an aggregated form, called bloom-blocks. It maintains a list of references between bloom-blocks and TSDB indexes in files called meta.jsons.
Bloom-compactor regularly runs to check for changes in meta.jsons and runs compaction only upon changes in TSDBs.
func New ¶
func New( cfg Config, schemaCfg config.SchemaConfig, storeCfg storage.Config, clientMetrics storage.ClientMetrics, fetcherProvider stores.ChunkFetcherProvider, ring ring.ReadRing, ringLifeCycler *ring.BasicLifecycler, limits Limits, store bloomshipper.StoreWithMetrics, logger log.Logger, r prometheus.Registerer, ) (*Compactor, error)
type Config ¶
type Config struct { // Ring configures the ring store used to save and retrieve the different Bloom-Compactor instances. // In case it isn't explicitly set, it follows the same behavior of the other rings (ex: using the common configuration // section and the ingester configuration by default). Ring ring.RingConfig `` /* 205-byte string literal not displayed */ // Enabled configures whether bloom-compactors should be used to compact index values into bloomfilters Enabled bool `yaml:"enabled"` CompactionInterval time.Duration `yaml:"compaction_interval"` MinTableOffset int `yaml:"min_table_offset"` MaxTableOffset int `yaml:"max_table_offset"` WorkerParallelism int `yaml:"worker_parallelism"` RetryMinBackoff time.Duration `yaml:"compaction_retries_min_backoff"` RetryMaxBackoff time.Duration `yaml:"compaction_retries_max_backoff"` CompactionRetries int `yaml:"compaction_retries"` MaxCompactionParallelism int `yaml:"max_compaction_parallelism"` RetentionConfig RetentionConfig `yaml:"retention"` }
Config configures the bloom-compactor component.
func (*Config) RegisterFlags ¶
RegisterFlags registers flags for the Bloom-Compactor configuration.
type LazyBlockBuilderIterator ¶
type LazyBlockBuilderIterator struct {
// contains filtered or unexported fields
}
LazyBlockBuilderIterator is a lazy iterator over blocks that builds each block by adding series to them until they are full.
func NewLazyBlockBuilderIterator ¶
func NewLazyBlockBuilderIterator( ctx context.Context, opts v1.BlockOptions, metrics *Metrics, populate func(*v1.Series, *v1.Bloom) (int, error), readWriterFn func() (v1.BlockWriter, v1.BlockReader), series v1.PeekingIterator[*v1.Series], blocks v1.ResettableIterator[*v1.SeriesWithBloom], ) *LazyBlockBuilderIterator
func (*LazyBlockBuilderIterator) At ¶
func (b *LazyBlockBuilderIterator) At() *v1.Block
func (*LazyBlockBuilderIterator) Bytes ¶
func (b *LazyBlockBuilderIterator) Bytes() (bytes int)
func (*LazyBlockBuilderIterator) Err ¶
func (b *LazyBlockBuilderIterator) Err() error
func (*LazyBlockBuilderIterator) Next ¶
func (b *LazyBlockBuilderIterator) Next() bool
type Limits ¶
type Limits interface { RetentionLimits BloomCompactorShardSize(tenantID string) int BloomCompactorEnabled(tenantID string) bool BloomNGramLength(tenantID string) int BloomNGramSkip(tenantID string) int BloomFalsePositiveRate(tenantID string) float64 BloomCompactorMaxBlockSize(tenantID string) int BloomBlockEncoding(tenantID string) string }
type Metrics ¶
type Metrics struct {
// contains filtered or unexported fields
}
func NewMetrics ¶
func NewMetrics(r prometheus.Registerer, bloomMetrics *v1.Metrics) *Metrics
type RetentionConfig ¶
type RetentionConfig struct { Enabled bool `yaml:"enabled"` MaxLookbackDays int `yaml:"max_lookback_days"` }
func (*RetentionConfig) RegisterFlags ¶
func (cfg *RetentionConfig) RegisterFlags(f *flag.FlagSet)
func (*RetentionConfig) Validate ¶
func (cfg *RetentionConfig) Validate() error
type RetentionLimits ¶
type RetentionLimits interface { RetentionPeriod(userID string) time.Duration StreamRetention(userID string) []validation.StreamRetention AllByUserID() map[string]*validation.Limits DefaultLimits() *validation.Limits }
type RetentionManager ¶
type RetentionManager struct {
// contains filtered or unexported fields
}
func NewRetentionManager ¶
func NewRetentionManager( cfg RetentionConfig, limits RetentionLimits, bloomStore bloomshipper.Store, sharding retentionSharding, metrics *Metrics, logger log.Logger, ) *RetentionManager
type SimpleBloomController ¶
type SimpleBloomController struct {
// contains filtered or unexported fields
}
func NewSimpleBloomController ¶
func NewSimpleBloomController( tsdbStore TSDBStore, blockStore bloomshipper.Store, chunkLoader ChunkLoader, limits Limits, metrics *Metrics, logger log.Logger, ) *SimpleBloomController
type SimpleBloomGenerator ¶
type SimpleBloomGenerator struct {
// contains filtered or unexported fields
}
Simple implementation of a BloomGenerator.
func NewSimpleBloomGenerator ¶
func NewSimpleBloomGenerator( userID string, opts v1.BlockOptions, store v1.Iterator[*v1.Series], chunkLoader ChunkLoader, blocksIter v1.ResettableIterator[*v1.SeriesWithBloom], readWriterFn func() (v1.BlockWriter, v1.BlockReader), reporter func(model.Fingerprint), metrics *Metrics, logger log.Logger, ) *SimpleBloomGenerator
SimpleBloomGenerator is a foundational implementation of BloomGenerator. It mainly wires up a few different components to generate bloom filters for a set of blocks and handles schema compatibility: Blocks which are incompatible with the schema are skipped and will have their chunks reindexed
func (*SimpleBloomGenerator) Generate ¶
func (s *SimpleBloomGenerator) Generate(ctx context.Context) *LazyBlockBuilderIterator
type StoreChunkLoader ¶
type StoreChunkLoader struct {
// contains filtered or unexported fields
}
StoreChunkLoader loads chunks from a store
func NewStoreChunkLoader ¶
func NewStoreChunkLoader(fetcherProvider stores.ChunkFetcherProvider, metrics *Metrics) *StoreChunkLoader
func (*StoreChunkLoader) Load ¶
func (s *StoreChunkLoader) Load(ctx context.Context, userID string, series *v1.Series) (*ChunkItersByFingerprint, error)
type TSDBStore ¶
type TSDBStore interface { UsersForPeriod(ctx context.Context, table config.DayTable) ([]string, error) ResolveTSDBs(ctx context.Context, table config.DayTable, tenant string) ([]tsdb.SingleTenantTSDBIdentifier, error) LoadTSDB( ctx context.Context, table config.DayTable, tenant string, id tsdb.Identifier, bounds v1.FingerprintBounds, ) (v1.Iterator[*v1.Series], error) }
type TSDBStores ¶
type TSDBStores struct {
// contains filtered or unexported fields
}
func NewTSDBStores ¶
func NewTSDBStores( schemaCfg config.SchemaConfig, storeCfg baseStore.Config, clientMetrics baseStore.ClientMetrics, logger log.Logger, ) (*TSDBStores, error)
func (*TSDBStores) LoadTSDB ¶
func (s *TSDBStores) LoadTSDB( ctx context.Context, table config.DayTable, tenant string, id tsdb.Identifier, bounds v1.FingerprintBounds, ) (v1.Iterator[*v1.Series], error)
func (*TSDBStores) ResolveTSDBs ¶
func (s *TSDBStores) ResolveTSDBs(ctx context.Context, table config.DayTable, tenant string) ([]tsdb.SingleTenantTSDBIdentifier, error)